hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
82f7783dad1700fc9171224edb0d35019fd8a3cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <cstring>
#include <stdint.h>
#include <iostream>
#include "ssdOpt.h"
#include "ssdOptMacros.h"
//#include "ssd_internal.h"
namespace nvinfer1
{
namespace plugin
{
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline int clz(int x) {
for( int i = 31; i >= 0; --i ) {
if( (1 << i) & x ) {
return 31 - i;
}
}
return 32;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static int find_log_2(int x, bool round_up = false) {
int a = 31 - clz(x);
if( round_up ) {
a += (x & (x-1)) ? 1 : 0;
}
return a;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void find_divisor(uint32_t &mul, uint32_t &shr, int x) {
assert(x != 0);
if( x == 1 ) {
// If dividing by 1, reduced math doesn't work because mul_coeff would need to be 2^32,
// which doesn't fit into unsigned int. the div() routine handles this special case
// separately.
mul = 0;
shr = 0;
} else {
// To express the division N/D in terms of a multiplication, what we first
// imagine is simply N*(1/D). However, 1/D will always evaluate to 0 (for D>1),
// so we need another way. There's nothing that says we have to use exactly
// the fraction 1/D; instead it could be any X/Y that reduces to 1/D (i.e.,
// Y=X*D), or at least to "close enough" to it. If we pick Y that is a power
// of two, then the N*(X/Y) can be N*X followed by a right-shift by some amount.
// The power of two we should pick should be at least 2^32, because in the
// div() routine we'll use umulhi(), which returns only the upper 32 bits --
// this being equivalent to a right-shift by 32. But we might want a higher
// power of two for better accuracy depending on the magnitude of the denominator.
// Once we've picked Y, then X [our mul_coeff value] is simply Y/D, rounding up,
// and we save shift_coeff as whatever further shift we have to do beyond
// what the umulhi() implies.
uint32_t p = 31 + find_log_2(x, true);
uint32_t m = ((1ull << p) + (uint32_t) x - 1) / (uint32_t) x;
mul = m;
shr = p - 32;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline __device__
void fast_divmod(int &div, int &mod, int x, int y, uint32_t mul, uint32_t shr) {
#if 0
if( y == 1 ) {
div = x;
mod = 0;
} else {
div = __umulhi((uint32_t) x, mul) >> shr;
mod = x - div*y;
}
#elif 1
div = x;
mod = 0;
if (y != 1) {
div = __umulhi((uint32_t) x, mul) >> shr;
mod = x - div*y;
}
#else
div = (y != 1) ? __umulhi((uint32_t) x, mul) >> shr : x;
mod = (y != 1) ? (x - div*y) : 0;
#endif
}
template <typename Dtype, int NUM_LAYERS>
struct PermuteConfData {
const Dtype * conf_data[NUM_LAYERS];
int feature_size[NUM_LAYERS];
int num_anchors[NUM_LAYERS];
int end_layer_prior[NUM_LAYERS];
int box_channels[NUM_LAYERS];
bool packed32_nchw;
};
/* This function maps the input index to the corresponding conf_data offset.
The input "conf_data" is composed of "num_layers" conf tensors from the CONV
layers in SSD. These tensors are in NCHW layout.
The input index is broken down to 4 components: i, c, d, n
i - box coordinate (max 4)
c - class (max num_classes)
d - prior (max num_priors)
n - batch size
The transformed conf_data is generated by:
conf_data[id_layer](CHW)->permute(1,2,0)(HWC)->reshape(H*W*C/num_classes/num_dims, num_classes, num_dims)
->concat(axis=1, num_layers)(num_priors, num_classes, num_dims)[->flatten(num_priors * num_classes * num_dims, 1, 1)]->permute(num_classes, num_priors, num_dims)
Correspondingly, the mapping process will first locate id_layer according to prior and then transform
the index based on (num_classes, num_priors, num_dims) backed to CHW.
C = num_anchors_layer * num_classes * num_dims
HW = num_priors_layer / num_anchors_layer
*/
template <typename Dtype, unsigned nthds_per_cta, int NUM_LAYERS>
//__launch_bounds__(nthds_per_cta)
__launch_bounds__(128)
__global__ void permuteConfData_kernel(
const int nthreads,
const int num_classes, int num_classes_mul, int num_classes_shr,
const int num_priors, int num_priors_mul, int num_priors_shr,
const int num_dim, int num_dim_mul, int num_dim_shr,
int fast_divmod3_mul, int fast_divmod3_shr,
int fast_divmod6_mul, int fast_divmod6_shr,
int fast_divmod4_mul, int fast_divmod4_shr,
bool confSigmoid,
Dtype* new_data,
int *active_counts_per_class,
const PermuteConfData<Dtype, NUM_LAYERS> permute_conf_data)
{
int feature_size[NUM_LAYERS];
int all_num_anchors[NUM_LAYERS];
const Dtype *conf_data[NUM_LAYERS];
#pragma unroll
for (int layer = 0;layer < NUM_LAYERS;++layer) {
feature_size[layer] = permute_conf_data.feature_size[layer];
all_num_anchors[layer] = permute_conf_data.num_anchors[layer];
conf_data[layer] = permute_conf_data.conf_data[layer];
}
const bool packed32_nchw = permute_conf_data.packed32_nchw;
int index = blockIdx.x * nthds_per_cta + threadIdx.x;
if (index < nthreads)
{
int i, i_div, d, d_div, c, n;
fast_divmod(i_div, i, index, num_dim, num_dim_mul, num_dim_shr);
fast_divmod(d_div, d, i_div, num_priors, num_priors_mul, num_priors_shr);
fast_divmod(n, c, d_div, num_classes, num_classes_mul, num_classes_shr);
if (n == 0) {
active_counts_per_class[n] = 0;
}
//find layer_id
int start_layer_prior = 0, end_layer_prior = 0;
int prior_in_layer = 0;
const Dtype *conf_data_layer;
int num_hw;
int layer;
int num_anchors;
int box_channel;
#pragma unroll
for(layer = 0; layer < NUM_LAYERS; layer++) {
end_layer_prior = permute_conf_data.end_layer_prior[layer];
if(d < end_layer_prior) {
conf_data_layer = conf_data[layer];
num_hw = feature_size[layer];
num_anchors = all_num_anchors[layer];
box_channel = permute_conf_data.box_channels[layer];
prior_in_layer = d - start_layer_prior;
d = INT_MAX;
}
start_layer_prior = end_layer_prior;
}
int mappedIndex;
int hw = prior_in_layer % num_hw;
int anchor = prior_in_layer / num_hw;
// in merged tensor, we prepend box_channel before conf_channels
int num_ch = box_channel + num_dim * num_classes * num_anchors;
int ch = box_channel + (i*num_classes + c)*num_anchors + anchor;
if(packed32_nchw) {
int packed_num_ch = (num_ch+31)/32;
int packed_ch = ch >> 5; // ch/32;
int packed_ch_offset = ch & 31; // ch%32;
mappedIndex = ((n * packed_num_ch + packed_ch)*num_hw + hw)*32 + packed_ch_offset;
}
else {
mappedIndex = (n * num_ch + ch)*num_hw + hw;
}
float result = conf_data_layer[mappedIndex];
if (confSigmoid)
result = __expf(result) / (1 + __expf(result));
new_data[index] = result;
}
}
template <typename Dtype>
ssdStatus_t permuteConfData_gpu(
hipStream_t stream,
const int nthreads,
const int num_classes,
const int num_priors,
const int num_dim,
bool confSigmoid,
const void* const* conf_data,
void* new_data,
void* active_count_per_class,
const int num_layers,
const int* feature_size,
const int* num_anchors,
const int* box_channels,
const bool packed32_nchw)
{
const int BS = 128;
const int GS = (nthreads + BS - 1) / BS;
if(num_layers == 6) { // handle a special case
PermuteConfData<Dtype, 6> permute_conf_data;
// precompute pow2(feature_size) and end_prior_layer for each loop iteration.
int start_layer_prior = 0;
for (int i = 0;i < num_layers;++i) {
permute_conf_data.feature_size[i] = feature_size[i] * feature_size[i];
permute_conf_data.num_anchors[i] = num_anchors[i];
permute_conf_data.box_channels[i] = box_channels[i];
int layer_prior_size = num_anchors[i] * permute_conf_data.feature_size[i];
int end_layer_prior = start_layer_prior + layer_prior_size;
permute_conf_data.end_layer_prior[i] = end_layer_prior;
start_layer_prior = end_layer_prior;
}
permute_conf_data.packed32_nchw = packed32_nchw;
// determine constants for efficient integer division
uint32_t num_classes_mul, num_classes_shr;
uint32_t num_priors_mul, num_priors_shr;
uint32_t num_dim_mul, num_dim_shr;
find_divisor(num_classes_mul, num_classes_shr, num_classes);
find_divisor(num_priors_mul, num_priors_shr, num_priors);
find_divisor(num_dim_mul, num_dim_shr, num_dim);
uint32_t fast_divmod_3_mul, fast_divmod_3_shr;
uint32_t fast_divmod_6_mul, fast_divmod_6_shr;
uint32_t fast_divmod_4_mul, fast_divmod_4_shr;
find_divisor(fast_divmod_3_mul, fast_divmod_3_shr, 3);
find_divisor(fast_divmod_6_mul, fast_divmod_6_shr, 6);
find_divisor(fast_divmod_4_mul, fast_divmod_4_shr, 4);
std::memcpy(permute_conf_data.conf_data, conf_data, 6 * sizeof(void*));
hipLaunchKernelGGL(( permuteConfData_kernel<Dtype, BS, 6>), dim3(GS), dim3(BS), 0, stream, nthreads,
num_classes, num_classes_mul, num_classes_shr,
num_priors, num_priors_mul, num_priors_shr,
num_dim, num_dim_mul, num_dim_shr,
fast_divmod_3_mul, fast_divmod_3_shr,
fast_divmod_6_mul, fast_divmod_6_shr,
fast_divmod_4_mul, fast_divmod_4_shr,
confSigmoid,
(Dtype*) new_data, (int*) active_count_per_class, permute_conf_data);
}
else{
std::cerr<< "Only support numLayers == 6" << std::endl;
return STATUS_FAILURE;
}
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// permuteConfData LAUNCH CONFIG {{{
typedef ssdStatus_t (*pdFunc)(hipStream_t,
const int,
const int,
const int,
const int,
bool,
const void* const*,
void*,
void*,
const int,
const int*,
const int*,
const int*,
const bool);
struct pdLaunchConfig
{
DType_t t_data;
pdFunc function;
pdLaunchConfig(DType_t t_data)
: t_data(t_data)
{
}
pdLaunchConfig(DType_t t_data, pdFunc function)
: t_data(t_data)
, function(function)
{
}
bool operator==(const pdLaunchConfig& other)
{
return t_data == other.t_data;
}
};
static std::vector<pdLaunchConfig> pdFuncVec;
bool permuteConfDataInit()
{
pdFuncVec.push_back(pdLaunchConfig(DataType::kFLOAT,
permuteConfData_gpu<float>));
return true;
}
static bool initialized = permuteConfDataInit();
//}}}
ssdStatus_t permuteConfData(hipStream_t stream,
const int nthreads,
const int num_classes,
const int num_priors,
const int num_dim,
const DType_t DT_DATA,
bool confSigmoid,
const void* const* conf_data,
void* new_data,
void* active_classes_per_batch,
const int num_layers,
const int * feature_size,
const int * num_anchors,
const int * box_channels,
const bool packed32_nchw)
{
pdLaunchConfig lc = pdLaunchConfig(DT_DATA);
for (unsigned i = 0; i < pdFuncVec.size(); ++i)
{
if (lc == pdFuncVec[i])
{
DEBUG_PRINTF("permuteConfData kernel %d\n", i);
return pdFuncVec[i].function(stream,
nthreads,
num_classes,
num_priors,
num_dim,
confSigmoid,
conf_data,
new_data,
active_classes_per_batch,
num_layers,
feature_size,
num_anchors,
box_channels,
packed32_nchw);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
| 82f7783dad1700fc9171224edb0d35019fd8a3cd.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <cstring>
#include <stdint.h>
#include <iostream>
#include "ssdOpt.h"
#include "ssdOptMacros.h"
//#include "ssd_internal.h"
namespace nvinfer1
{
namespace plugin
{
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline int clz(int x) {
for( int i = 31; i >= 0; --i ) {
if( (1 << i) & x ) {
return 31 - i;
}
}
return 32;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static int find_log_2(int x, bool round_up = false) {
int a = 31 - clz(x);
if( round_up ) {
a += (x & (x-1)) ? 1 : 0;
}
return a;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static void find_divisor(uint32_t &mul, uint32_t &shr, int x) {
assert(x != 0);
if( x == 1 ) {
// If dividing by 1, reduced math doesn't work because mul_coeff would need to be 2^32,
// which doesn't fit into unsigned int. the div() routine handles this special case
// separately.
mul = 0;
shr = 0;
} else {
// To express the division N/D in terms of a multiplication, what we first
// imagine is simply N*(1/D). However, 1/D will always evaluate to 0 (for D>1),
// so we need another way. There's nothing that says we have to use exactly
// the fraction 1/D; instead it could be any X/Y that reduces to 1/D (i.e.,
// Y=X*D), or at least to "close enough" to it. If we pick Y that is a power
// of two, then the N*(X/Y) can be N*X followed by a right-shift by some amount.
// The power of two we should pick should be at least 2^32, because in the
// div() routine we'll use umulhi(), which returns only the upper 32 bits --
// this being equivalent to a right-shift by 32. But we might want a higher
// power of two for better accuracy depending on the magnitude of the denominator.
// Once we've picked Y, then X [our mul_coeff value] is simply Y/D, rounding up,
// and we save shift_coeff as whatever further shift we have to do beyond
// what the umulhi() implies.
uint32_t p = 31 + find_log_2(x, true);
uint32_t m = ((1ull << p) + (uint32_t) x - 1) / (uint32_t) x;
mul = m;
shr = p - 32;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline __device__
void fast_divmod(int &div, int &mod, int x, int y, uint32_t mul, uint32_t shr) {
#if 0
if( y == 1 ) {
div = x;
mod = 0;
} else {
div = __umulhi((uint32_t) x, mul) >> shr;
mod = x - div*y;
}
#elif 1
div = x;
mod = 0;
if (y != 1) {
div = __umulhi((uint32_t) x, mul) >> shr;
mod = x - div*y;
}
#else
div = (y != 1) ? __umulhi((uint32_t) x, mul) >> shr : x;
mod = (y != 1) ? (x - div*y) : 0;
#endif
}
template <typename Dtype, int NUM_LAYERS>
struct PermuteConfData {
const Dtype * conf_data[NUM_LAYERS];
int feature_size[NUM_LAYERS];
int num_anchors[NUM_LAYERS];
int end_layer_prior[NUM_LAYERS];
int box_channels[NUM_LAYERS];
bool packed32_nchw;
};
/* This function maps the input index to the corresponding conf_data offset.
The input "conf_data" is composed of "num_layers" conf tensors from the CONV
layers in SSD. These tensors are in NCHW layout.
The input index is broken down to 4 components: i, c, d, n
i - box coordinate (max 4)
c - class (max num_classes)
d - prior (max num_priors)
n - batch size
The transformed conf_data is generated by:
conf_data[id_layer](CHW)->permute(1,2,0)(HWC)->reshape(H*W*C/num_classes/num_dims, num_classes, num_dims)
->concat(axis=1, num_layers)(num_priors, num_classes, num_dims)[->flatten(num_priors * num_classes * num_dims, 1, 1)]->permute(num_classes, num_priors, num_dims)
Correspondingly, the mapping process will first locate id_layer according to prior and then transform
the index based on (num_classes, num_priors, num_dims) backed to CHW.
C = num_anchors_layer * num_classes * num_dims
HW = num_priors_layer / num_anchors_layer
*/
template <typename Dtype, unsigned nthds_per_cta, int NUM_LAYERS>
//__launch_bounds__(nthds_per_cta)
__launch_bounds__(128)
__global__ void permuteConfData_kernel(
const int nthreads,
const int num_classes, int num_classes_mul, int num_classes_shr,
const int num_priors, int num_priors_mul, int num_priors_shr,
const int num_dim, int num_dim_mul, int num_dim_shr,
int fast_divmod3_mul, int fast_divmod3_shr,
int fast_divmod6_mul, int fast_divmod6_shr,
int fast_divmod4_mul, int fast_divmod4_shr,
bool confSigmoid,
Dtype* new_data,
int *active_counts_per_class,
const PermuteConfData<Dtype, NUM_LAYERS> permute_conf_data)
{
int feature_size[NUM_LAYERS];
int all_num_anchors[NUM_LAYERS];
const Dtype *conf_data[NUM_LAYERS];
#pragma unroll
for (int layer = 0;layer < NUM_LAYERS;++layer) {
feature_size[layer] = permute_conf_data.feature_size[layer];
all_num_anchors[layer] = permute_conf_data.num_anchors[layer];
conf_data[layer] = permute_conf_data.conf_data[layer];
}
const bool packed32_nchw = permute_conf_data.packed32_nchw;
int index = blockIdx.x * nthds_per_cta + threadIdx.x;
if (index < nthreads)
{
int i, i_div, d, d_div, c, n;
fast_divmod(i_div, i, index, num_dim, num_dim_mul, num_dim_shr);
fast_divmod(d_div, d, i_div, num_priors, num_priors_mul, num_priors_shr);
fast_divmod(n, c, d_div, num_classes, num_classes_mul, num_classes_shr);
if (n == 0) {
active_counts_per_class[n] = 0;
}
//find layer_id
int start_layer_prior = 0, end_layer_prior = 0;
int prior_in_layer = 0;
const Dtype *conf_data_layer;
int num_hw;
int layer;
int num_anchors;
int box_channel;
#pragma unroll
for(layer = 0; layer < NUM_LAYERS; layer++) {
end_layer_prior = permute_conf_data.end_layer_prior[layer];
if(d < end_layer_prior) {
conf_data_layer = conf_data[layer];
num_hw = feature_size[layer];
num_anchors = all_num_anchors[layer];
box_channel = permute_conf_data.box_channels[layer];
prior_in_layer = d - start_layer_prior;
d = INT_MAX;
}
start_layer_prior = end_layer_prior;
}
int mappedIndex;
int hw = prior_in_layer % num_hw;
int anchor = prior_in_layer / num_hw;
// in merged tensor, we prepend box_channel before conf_channels
int num_ch = box_channel + num_dim * num_classes * num_anchors;
int ch = box_channel + (i*num_classes + c)*num_anchors + anchor;
if(packed32_nchw) {
int packed_num_ch = (num_ch+31)/32;
int packed_ch = ch >> 5; // ch/32;
int packed_ch_offset = ch & 31; // ch%32;
mappedIndex = ((n * packed_num_ch + packed_ch)*num_hw + hw)*32 + packed_ch_offset;
}
else {
mappedIndex = (n * num_ch + ch)*num_hw + hw;
}
float result = conf_data_layer[mappedIndex];
if (confSigmoid)
result = __expf(result) / (1 + __expf(result));
new_data[index] = result;
}
}
template <typename Dtype>
ssdStatus_t permuteConfData_gpu(
cudaStream_t stream,
const int nthreads,
const int num_classes,
const int num_priors,
const int num_dim,
bool confSigmoid,
const void* const* conf_data,
void* new_data,
void* active_count_per_class,
const int num_layers,
const int* feature_size,
const int* num_anchors,
const int* box_channels,
const bool packed32_nchw)
{
const int BS = 128;
const int GS = (nthreads + BS - 1) / BS;
if(num_layers == 6) { // handle a special case
PermuteConfData<Dtype, 6> permute_conf_data;
// precompute pow2(feature_size) and end_prior_layer for each loop iteration.
int start_layer_prior = 0;
for (int i = 0;i < num_layers;++i) {
permute_conf_data.feature_size[i] = feature_size[i] * feature_size[i];
permute_conf_data.num_anchors[i] = num_anchors[i];
permute_conf_data.box_channels[i] = box_channels[i];
int layer_prior_size = num_anchors[i] * permute_conf_data.feature_size[i];
int end_layer_prior = start_layer_prior + layer_prior_size;
permute_conf_data.end_layer_prior[i] = end_layer_prior;
start_layer_prior = end_layer_prior;
}
permute_conf_data.packed32_nchw = packed32_nchw;
// determine constants for efficient integer division
uint32_t num_classes_mul, num_classes_shr;
uint32_t num_priors_mul, num_priors_shr;
uint32_t num_dim_mul, num_dim_shr;
find_divisor(num_classes_mul, num_classes_shr, num_classes);
find_divisor(num_priors_mul, num_priors_shr, num_priors);
find_divisor(num_dim_mul, num_dim_shr, num_dim);
uint32_t fast_divmod_3_mul, fast_divmod_3_shr;
uint32_t fast_divmod_6_mul, fast_divmod_6_shr;
uint32_t fast_divmod_4_mul, fast_divmod_4_shr;
find_divisor(fast_divmod_3_mul, fast_divmod_3_shr, 3);
find_divisor(fast_divmod_6_mul, fast_divmod_6_shr, 6);
find_divisor(fast_divmod_4_mul, fast_divmod_4_shr, 4);
std::memcpy(permute_conf_data.conf_data, conf_data, 6 * sizeof(void*));
permuteConfData_kernel<Dtype, BS, 6><<<GS, BS, 0, stream>>>(nthreads,
num_classes, num_classes_mul, num_classes_shr,
num_priors, num_priors_mul, num_priors_shr,
num_dim, num_dim_mul, num_dim_shr,
fast_divmod_3_mul, fast_divmod_3_shr,
fast_divmod_6_mul, fast_divmod_6_shr,
fast_divmod_4_mul, fast_divmod_4_shr,
confSigmoid,
(Dtype*) new_data, (int*) active_count_per_class, permute_conf_data);
}
else{
std::cerr<< "Only support numLayers == 6" << std::endl;
return STATUS_FAILURE;
}
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// permuteConfData LAUNCH CONFIG {{{
typedef ssdStatus_t (*pdFunc)(cudaStream_t,
const int,
const int,
const int,
const int,
bool,
const void* const*,
void*,
void*,
const int,
const int*,
const int*,
const int*,
const bool);
struct pdLaunchConfig
{
DType_t t_data;
pdFunc function;
pdLaunchConfig(DType_t t_data)
: t_data(t_data)
{
}
pdLaunchConfig(DType_t t_data, pdFunc function)
: t_data(t_data)
, function(function)
{
}
bool operator==(const pdLaunchConfig& other)
{
return t_data == other.t_data;
}
};
static std::vector<pdLaunchConfig> pdFuncVec;
bool permuteConfDataInit()
{
pdFuncVec.push_back(pdLaunchConfig(DataType::kFLOAT,
permuteConfData_gpu<float>));
return true;
}
static bool initialized = permuteConfDataInit();
//}}}
ssdStatus_t permuteConfData(cudaStream_t stream,
const int nthreads,
const int num_classes,
const int num_priors,
const int num_dim,
const DType_t DT_DATA,
bool confSigmoid,
const void* const* conf_data,
void* new_data,
void* active_classes_per_batch,
const int num_layers,
const int * feature_size,
const int * num_anchors,
const int * box_channels,
const bool packed32_nchw)
{
pdLaunchConfig lc = pdLaunchConfig(DT_DATA);
for (unsigned i = 0; i < pdFuncVec.size(); ++i)
{
if (lc == pdFuncVec[i])
{
DEBUG_PRINTF("permuteConfData kernel %d\n", i);
return pdFuncVec[i].function(stream,
nthreads,
num_classes,
num_priors,
num_dim,
confSigmoid,
conf_data,
new_data,
active_classes_per_batch,
num_layers,
feature_size,
num_anchors,
box_channels,
packed32_nchw);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
|
75ec68e450138ab767915d926e8481c9ce480fb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "right_looking.cu"
int main()
{
int n,N;
printf("Enter dimension (N) : ");
scanf("%d",&n);
if((n%TILE_SIZE)==0)
N = n;
else
N = (((int) (n/TILE_SIZE)) + 1)*TILE_SIZE;
size_t size = N*N*sizeof(float);
float *M = (float *)malloc(size);
if(M == NULL)
{
fprintf(stderr,"Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
int i,j;
printf("Enter input matrix: \n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(i>=n || j>=n)
M[i*N + j] = 1; //Padding the matrix with 1
else
scanf("%f",&M[i*N + j]);
}
}
hipError_t err = hipSuccess;
float *read_data = NULL;
err = hipMalloc((void **)&read_data,N*N*sizeof(float));
if(err != hipSuccess)
{
fprintf(stderr,"Failed to allocate matrix on the CUDA device! (error code %s)\n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Coping the matrix from host memory to device memory\n");
err = hipMemcpy(read_data,M,size,hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr,"Failed to copy matrix from host to device (error code %s)\n",hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Testing for matrix M [%dx%d]\n",N,N);
dim3 grid(1,1,1);
dim3 block(TILE_SIZE,TILE_SIZE,1);
hipLaunchKernelGGL(( right_looking_launch_kernel), dim3(grid),dim3(block), 0, 0, read_data,N);
err = hipMemcpy(M,read_data,size,hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy the output matrix M from device to Host (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Printing output matrix\n");
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
if(j<=i)
printf("%f\t",M[i*N + j]);
else
printf("%f\t",0.0);
}
printf("\n");
}
err = hipFree(read_data);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to free device matrix M (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipDeviceReset();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(M);
printf("DONE!\n");
return 0;
} | 75ec68e450138ab767915d926e8481c9ce480fb3.cu | #include "right_looking.cu"
int main()
{
int n,N;
printf("Enter dimension (N) : ");
scanf("%d",&n);
if((n%TILE_SIZE)==0)
N = n;
else
N = (((int) (n/TILE_SIZE)) + 1)*TILE_SIZE;
size_t size = N*N*sizeof(float);
float *M = (float *)malloc(size);
if(M == NULL)
{
fprintf(stderr,"Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
int i,j;
printf("Enter input matrix: \n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(i>=n || j>=n)
M[i*N + j] = 1; //Padding the matrix with 1
else
scanf("%f",&M[i*N + j]);
}
}
cudaError_t err = cudaSuccess;
float *read_data = NULL;
err = cudaMalloc((void **)&read_data,N*N*sizeof(float));
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to allocate matrix on the CUDA device! (error code %s)\n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Coping the matrix from host memory to device memory\n");
err = cudaMemcpy(read_data,M,size,cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr,"Failed to copy matrix from host to device (error code %s)\n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Testing for matrix M [%dx%d]\n",N,N);
dim3 grid(1,1,1);
dim3 block(TILE_SIZE,TILE_SIZE,1);
right_looking_launch_kernel<<<grid,block>>>(read_data,N);
err = cudaMemcpy(M,read_data,size,cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy the output matrix M from device to Host (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Printing output matrix\n");
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
if(j<=i)
printf("%f\t",M[i*N + j]);
else
printf("%f\t",0.0);
}
printf("\n");
}
err = cudaFree(read_data);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix M (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaDeviceReset();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(M);
printf("DONE!\n");
return 0;
} |
3bdcc5fbb62e00a7d7780fa44a78efc456e35243.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
using std::cout;
using std::generate;
using std::vector;
__global__ void matrixMul(const int *a, const int *b, int *c, int N) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
// Accumulate results for a single element
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
// Check result on the CPU
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N) {
// For every row...
for (int i = 0; i < N; i++) {
// For every column...
for (int j = 0; j < N; j++) {
// For every element in the row-column pair
int tmp = 0;
for (int k = 0; k < N; k++) {
// Accumulate the partial results
tmp += a[i * N + k] * b[k * N + j];
}
// Check against the CPU result
assert(tmp == c[i * N + j]);
}
}
}
int main() {
// Matrix size of 1024 x 1024;
int N = 100;
// Size (in bytes) of matrix
size_t bytes = N * N * sizeof(int);
// Host vectors
vector<int> h_a(N * N);
vector<int> h_b(N * N);
vector<int> h_c(N * N);
// Initialize matrices
generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
// Allocate device memory
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Copy data to the device
hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice);
// Threads per CTA dimension
int THREADS = 32;
// Blocks per grid dimension (assumes THREADS divides N evenly)
int BLOCKS = N / THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
// Launch kernel
matrixMul << <blocks, threads >> > (d_a, d_b, d_c, N);
// Copy back to the host
hipMemcpy(h_c.data(), d_c, bytes, hipMemcpyDeviceToHost);
// Check result
verify_result(h_a, h_b, h_c, N);
cout << "COMPLETED SUCCESSFULLY\n";
// Free memory on device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 3bdcc5fbb62e00a7d7780fa44a78efc456e35243.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
using std::cout;
using std::generate;
using std::vector;
__global__ void matrixMul(const int *a, const int *b, int *c, int N) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
// Accumulate results for a single element
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
// Check result on the CPU
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N) {
// For every row...
for (int i = 0; i < N; i++) {
// For every column...
for (int j = 0; j < N; j++) {
// For every element in the row-column pair
int tmp = 0;
for (int k = 0; k < N; k++) {
// Accumulate the partial results
tmp += a[i * N + k] * b[k * N + j];
}
// Check against the CPU result
assert(tmp == c[i * N + j]);
}
}
}
int main() {
// Matrix size of 1024 x 1024;
int N = 100;
// Size (in bytes) of matrix
size_t bytes = N * N * sizeof(int);
// Host vectors
vector<int> h_a(N * N);
vector<int> h_b(N * N);
vector<int> h_c(N * N);
// Initialize matrices
generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
// Allocate device memory
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy data to the device
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
// Threads per CTA dimension
int THREADS = 32;
// Blocks per grid dimension (assumes THREADS divides N evenly)
int BLOCKS = N / THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
// Launch kernel
matrixMul << <blocks, threads >> > (d_a, d_b, d_c, N);
// Copy back to the host
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
// Check result
verify_result(h_a, h_b, h_c, N);
cout << "COMPLETED SUCCESSFULLY\n";
// Free memory on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
ace69f069245796af8551290942a9bed99437624.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <quda_internal.h>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <test_util.h>
#include <face_quda.h>
#include <misc.h>
// include because of nasty globals used in the tests
#include <dslash_util.h>
#include <dirac_quda.h>
#include <algorithm>
extern QudaDslashType dslash_type;
extern QudaInverterType inv_type;
extern int nvec;
extern int device;
extern int xdim;
extern int ydim;
extern int zdim;
extern int tdim;
extern int gridsize_from_cmdline[];
extern int niter;
extern int Nsrc; // number of spinors to apply to simultaneously
extern bool verify_results;
extern int test_type;
extern QudaPrecision prec;
extern void usage(char** );
using namespace quda;
ColorSpinorField *xH, *yH;
ColorSpinorField *xD, *yD;
cpuGaugeField *Y_h, *X_h, *Xinv_h, *Yhat_h;
cudaGaugeField *Y_d, *X_d, *Xinv_d, *Yhat_d;
int Nspin;
int Ncolor;
void
display_test_info()
{
printfQuda("running the following test:\n");
printfQuda("S_dimension T_dimension Nspin Ncolor\n");
printfQuda("%3d /%3d / %3d %3d %d %d\n", xdim, ydim, zdim, tdim, Nspin, Ncolor);
printfQuda("Grid partition info: X Y Z T\n");
printfQuda(" %d %d %d %d\n",
dimPartitioned(0),
dimPartitioned(1),
dimPartitioned(2),
dimPartitioned(3));
return;
}
void initFields(QudaPrecision prec)
{
ColorSpinorParam param;
param.nColor = Ncolor;
param.nSpin = Nspin;
param.nDim = 5; // number of spacetime dimensions
param.pad = 0; // padding must be zero for cpu fields
param.siteSubset = QUDA_FULL_SITE_SUBSET;
param.x[0] = xdim;
param.x[1] = ydim;
param.x[2] = zdim;
param.x[3] = tdim;
param.x[4] = Nsrc;
param.PCtype = QUDA_4D_PC;
param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS;
param.precision = QUDA_DOUBLE_PRECISION;
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.create = QUDA_ZERO_FIELD_CREATE;
xH = new cpuColorSpinorField(param);
yH = new cpuColorSpinorField(param);
//static_cast<cpuColorSpinorField*>(xH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
//static_cast<cpuColorSpinorField*>(yH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
// Now set the parameters for the cuda fields
//param.pad = xdim*ydim*zdim/2;
if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
param.create = QUDA_ZERO_FIELD_CREATE;
param.precision = prec;
param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER;
xD = new cudaColorSpinorField(param);
yD = new cudaColorSpinorField(param);
// check for successful allocation
checkCudaError();
//*xD = *xH;
//*yD = *yH;
GaugeFieldParam gParam;
gParam.x[0] = xdim;
gParam.x[1] = ydim;
gParam.x[2] = zdim;
gParam.x[3] = tdim;
gParam.nColor = param.nColor*param.nSpin;
gParam.reconstruct = QUDA_RECONSTRUCT_NO;
gParam.order = QUDA_QDP_GAUGE_ORDER;
gParam.link_type = QUDA_COARSE_LINKS;
gParam.t_boundary = QUDA_PERIODIC_T;
gParam.create = QUDA_ZERO_FIELD_CREATE;
gParam.precision = param.precision;
gParam.nDim = 4;
gParam.siteSubset = QUDA_FULL_SITE_SUBSET;
gParam.ghostExchange = QUDA_GHOST_EXCHANGE_PAD;
gParam.nFace = 1;
gParam.geometry = QUDA_COARSE_GEOMETRY;
Y_h = new cpuGaugeField(gParam);
Yhat_h = new cpuGaugeField(gParam);
gParam.geometry = QUDA_SCALAR_GEOMETRY;
X_h = new cpuGaugeField(gParam);
Xinv_h = new cpuGaugeField(gParam);
gParam.order = QUDA_FLOAT2_GAUGE_ORDER;
gParam.geometry = QUDA_COARSE_GEOMETRY;
int pad = ::max( { (gParam.x[0]*gParam.x[1]*gParam.x[2])/2,
(gParam.x[1]*gParam.x[2]*gParam.x[3])/2,
(gParam.x[0]*gParam.x[2]*gParam.x[3])/2,
(gParam.x[0]*gParam.x[1]*gParam.x[3])/2 } );
gParam.pad = gParam.nFace * pad;
Y_d = new cudaGaugeField(gParam);
Yhat_d = new cudaGaugeField(gParam);
Y_d->copy(*Y_h);
Yhat_d->copy(*Yhat_h);
gParam.geometry = QUDA_SCALAR_GEOMETRY;
gParam.ghostExchange = QUDA_GHOST_EXCHANGE_NO;
X_d = new cudaGaugeField(gParam);
Xinv_d = new cudaGaugeField(gParam);
X_d->copy(*X_h);
Xinv_d->copy(*Xinv_h);
}
void freeFields()
{
delete xD;
delete yD;
delete xH;
delete yH;
delete Y_h;
delete X_h;
delete Xinv_h;
delete Yhat_h;
delete Y_d;
delete X_d;
delete Xinv_d;
delete Yhat_d;
}
DiracCoarse *dirac;
double benchmark(int test, const int niter) {
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
switch(test) {
case 0:
for (int i=0; i < niter; ++i) dirac->Dslash(xD->Even(), yD->Odd(), QUDA_EVEN_PARITY);
break;
case 1:
for (int i=0; i < niter; ++i) dirac->M(*xD, *yD);
break;
case 2:
for (int i=0; i < niter; ++i) dirac->Clover(xD->Even(), yD->Even(), QUDA_EVEN_PARITY);
break;
default:
errorQuda("Undefined test %d", test);
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
float runTime;
hipEventElapsedTime(&runTime, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
double secs = runTime / 1000;
return secs;
}
const char *names[] = {
"Dslash",
"Mat",
"Clover"
};
int main(int argc, char** argv)
{
for (int i = 1; i < argc; i++){
if(process_command_line_option(argc, argv, &i) == 0){
continue;
}
printfQuda("ERROR: Invalid option:%s\n", argv[i]);
usage(argv);
}
initComms(argc, argv, gridsize_from_cmdline);
display_test_info();
initQuda(device);
// enable the tuning
setVerbosity(QUDA_SUMMARIZE);
Nspin = 2;
printfQuda("\nBenchmarking %s precision with %d iterations...\n\n", get_prec_str(prec), niter);
for (int c=4; c<=32; c+=4) {
Ncolor = c;
initFields(prec);
DiracParam param;
dirac = new DiracCoarse(param, Y_h, X_h, Xinv_h, Yhat_h, Y_d, X_d, Xinv_d, Yhat_d);
// do the initial tune
benchmark(test_type, 1);
// now rerun with more iterations to get accurate speed measurements
dirac->Flops(); // reset flops counter
double secs = benchmark(test_type, niter);
double gflops = (dirac->Flops()*1e-9)/(secs);
printfQuda("Ncolor = %2d, %-31s: Gflop/s = %6.1f\n", Ncolor, names[test_type], gflops);
delete dirac;
freeFields();
}
// clear the error state
hipGetLastError();
endQuda();
finalizeComms();
}
| ace69f069245796af8551290942a9bed99437624.cu | #include <stdio.h>
#include <stdlib.h>
#include <quda_internal.h>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <test_util.h>
#include <face_quda.h>
#include <misc.h>
// include because of nasty globals used in the tests
#include <dslash_util.h>
#include <dirac_quda.h>
#include <algorithm>
extern QudaDslashType dslash_type;
extern QudaInverterType inv_type;
extern int nvec;
extern int device;
extern int xdim;
extern int ydim;
extern int zdim;
extern int tdim;
extern int gridsize_from_cmdline[];
extern int niter;
extern int Nsrc; // number of spinors to apply to simultaneously
extern bool verify_results;
extern int test_type;
extern QudaPrecision prec;
extern void usage(char** );
using namespace quda;
ColorSpinorField *xH, *yH;
ColorSpinorField *xD, *yD;
cpuGaugeField *Y_h, *X_h, *Xinv_h, *Yhat_h;
cudaGaugeField *Y_d, *X_d, *Xinv_d, *Yhat_d;
int Nspin;
int Ncolor;
void
display_test_info()
{
printfQuda("running the following test:\n");
printfQuda("S_dimension T_dimension Nspin Ncolor\n");
printfQuda("%3d /%3d / %3d %3d %d %d\n", xdim, ydim, zdim, tdim, Nspin, Ncolor);
printfQuda("Grid partition info: X Y Z T\n");
printfQuda(" %d %d %d %d\n",
dimPartitioned(0),
dimPartitioned(1),
dimPartitioned(2),
dimPartitioned(3));
return;
}
void initFields(QudaPrecision prec)
{
ColorSpinorParam param;
param.nColor = Ncolor;
param.nSpin = Nspin;
param.nDim = 5; // number of spacetime dimensions
param.pad = 0; // padding must be zero for cpu fields
param.siteSubset = QUDA_FULL_SITE_SUBSET;
param.x[0] = xdim;
param.x[1] = ydim;
param.x[2] = zdim;
param.x[3] = tdim;
param.x[4] = Nsrc;
param.PCtype = QUDA_4D_PC;
param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER;
param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS;
param.precision = QUDA_DOUBLE_PRECISION;
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.create = QUDA_ZERO_FIELD_CREATE;
xH = new cpuColorSpinorField(param);
yH = new cpuColorSpinorField(param);
//static_cast<cpuColorSpinorField*>(xH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
//static_cast<cpuColorSpinorField*>(yH)->Source(QUDA_RANDOM_SOURCE, 0, 0, 0);
// Now set the parameters for the cuda fields
//param.pad = xdim*ydim*zdim/2;
if (param.nSpin == 4) param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS;
param.create = QUDA_ZERO_FIELD_CREATE;
param.precision = prec;
param.fieldOrder = QUDA_FLOAT2_FIELD_ORDER;
xD = new cudaColorSpinorField(param);
yD = new cudaColorSpinorField(param);
// check for successful allocation
checkCudaError();
//*xD = *xH;
//*yD = *yH;
GaugeFieldParam gParam;
gParam.x[0] = xdim;
gParam.x[1] = ydim;
gParam.x[2] = zdim;
gParam.x[3] = tdim;
gParam.nColor = param.nColor*param.nSpin;
gParam.reconstruct = QUDA_RECONSTRUCT_NO;
gParam.order = QUDA_QDP_GAUGE_ORDER;
gParam.link_type = QUDA_COARSE_LINKS;
gParam.t_boundary = QUDA_PERIODIC_T;
gParam.create = QUDA_ZERO_FIELD_CREATE;
gParam.precision = param.precision;
gParam.nDim = 4;
gParam.siteSubset = QUDA_FULL_SITE_SUBSET;
gParam.ghostExchange = QUDA_GHOST_EXCHANGE_PAD;
gParam.nFace = 1;
gParam.geometry = QUDA_COARSE_GEOMETRY;
Y_h = new cpuGaugeField(gParam);
Yhat_h = new cpuGaugeField(gParam);
gParam.geometry = QUDA_SCALAR_GEOMETRY;
X_h = new cpuGaugeField(gParam);
Xinv_h = new cpuGaugeField(gParam);
gParam.order = QUDA_FLOAT2_GAUGE_ORDER;
gParam.geometry = QUDA_COARSE_GEOMETRY;
int pad = std::max( { (gParam.x[0]*gParam.x[1]*gParam.x[2])/2,
(gParam.x[1]*gParam.x[2]*gParam.x[3])/2,
(gParam.x[0]*gParam.x[2]*gParam.x[3])/2,
(gParam.x[0]*gParam.x[1]*gParam.x[3])/2 } );
gParam.pad = gParam.nFace * pad;
Y_d = new cudaGaugeField(gParam);
Yhat_d = new cudaGaugeField(gParam);
Y_d->copy(*Y_h);
Yhat_d->copy(*Yhat_h);
gParam.geometry = QUDA_SCALAR_GEOMETRY;
gParam.ghostExchange = QUDA_GHOST_EXCHANGE_NO;
X_d = new cudaGaugeField(gParam);
Xinv_d = new cudaGaugeField(gParam);
X_d->copy(*X_h);
Xinv_d->copy(*Xinv_h);
}
void freeFields()
{
delete xD;
delete yD;
delete xH;
delete yH;
delete Y_h;
delete X_h;
delete Xinv_h;
delete Yhat_h;
delete Y_d;
delete X_d;
delete Xinv_d;
delete Yhat_d;
}
DiracCoarse *dirac;
double benchmark(int test, const int niter) {
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
switch(test) {
case 0:
for (int i=0; i < niter; ++i) dirac->Dslash(xD->Even(), yD->Odd(), QUDA_EVEN_PARITY);
break;
case 1:
for (int i=0; i < niter; ++i) dirac->M(*xD, *yD);
break;
case 2:
for (int i=0; i < niter; ++i) dirac->Clover(xD->Even(), yD->Even(), QUDA_EVEN_PARITY);
break;
default:
errorQuda("Undefined test %d", test);
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float runTime;
cudaEventElapsedTime(&runTime, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
double secs = runTime / 1000;
return secs;
}
const char *names[] = {
"Dslash",
"Mat",
"Clover"
};
int main(int argc, char** argv)
{
for (int i = 1; i < argc; i++){
if(process_command_line_option(argc, argv, &i) == 0){
continue;
}
printfQuda("ERROR: Invalid option:%s\n", argv[i]);
usage(argv);
}
initComms(argc, argv, gridsize_from_cmdline);
display_test_info();
initQuda(device);
// enable the tuning
setVerbosity(QUDA_SUMMARIZE);
Nspin = 2;
printfQuda("\nBenchmarking %s precision with %d iterations...\n\n", get_prec_str(prec), niter);
for (int c=4; c<=32; c+=4) {
Ncolor = c;
initFields(prec);
DiracParam param;
dirac = new DiracCoarse(param, Y_h, X_h, Xinv_h, Yhat_h, Y_d, X_d, Xinv_d, Yhat_d);
// do the initial tune
benchmark(test_type, 1);
// now rerun with more iterations to get accurate speed measurements
dirac->Flops(); // reset flops counter
double secs = benchmark(test_type, niter);
double gflops = (dirac->Flops()*1e-9)/(secs);
printfQuda("Ncolor = %2d, %-31s: Gflop/s = %6.1f\n", Ncolor, names[test_type], gflops);
delete dirac;
freeFields();
}
// clear the error state
cudaGetLastError();
endQuda();
finalizeComms();
}
|
151e376d589bd8ee9ff49bcf248895191441a1b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h>
#include <thrust/system/detail/generic/reduce_by_key.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust_wrapper.h>
#include <error.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <misc.h>
#include <logger.h>
#include <hash_workspace.h>
#include <matrix_io.h>
#include <device_properties.h>
#include <amgx_types/util.h>
namespace amgx
{
namespace aggregation
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <sm_utils.inl>
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
#include <hash_containers_sm70.inl> // Included inside the namespace to solve name collisions.
static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 )
{
int offset = -1;
if ( utils::lane_id() == 0 )
{
offset = atomicAdd( queue, count );
}
return utils::shfl( offset, 0 );
}
#else
#include <hash_containers_sm35.inl> // Included inside the namespace to solve name collisions.
static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 )
{
int offset = -1;
if ( utils::lane_id() == 0 )
{
offset = atomicAdd( queue, count );
}
return utils::shfl( offset, 0 );
}
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool COUNT_ONLY >
__global__ __launch_bounds__( CTA_SIZE )
void
compute_sparsity_kernel( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *aggregates,
int *Ac_rows,
int *Ac_cols,
int *Ac_pos,
const int gmem_size,
int *g_keys,
int *wk_work_queue,
int *wk_status )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW;
// The hash keys stored in shared memory.
__shared__ int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW;
const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// Make sure we have to proceed.
if ( COUNT_ONLY )
{
volatile int *status = reinterpret_cast<volatile int *>( wk_status );
if ( set.has_failed() || *status != 0 )
{
return;
}
}
// Clear the set.
set.clear();
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Iterate over the columns of R.
for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end;
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
const int num_rows = __popc( utils::ballot(is_active) );
// Uniform loop: threads collaborate to load other elements.
for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS )
{
int local_k = k + lane_id_div_num_threads;
// Is it an active thread.
bool is_active_k = local_k < num_rows;
// Threads in the warp proceeds columns of B in the range [bColIt, bColEnd).
const int uniform_a_row_id = utils::shfl( a_row_id, local_k );
// Load the range of the row of B.
int a_col_it = 0, a_col_end = 0;
if ( is_active_k )
{
a_col_it = A_rows[uniform_a_row_id + 0];
a_col_end = A_rows[uniform_a_row_id + 1];
}
// Iterate over the range of columns of B.
for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW )
{
int a_col_id = -1, a_agg_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
a_agg_id = aggregates[a_col_id];
}
if ( HAS_DIAG && a_agg_id == r_row_id )
{
a_agg_id = -1;
}
set.insert( a_agg_id, COUNT_ONLY ? wk_status : NULL );
}
}
}
// Store the results.
if ( COUNT_ONLY )
{
int count = set.compute_size_with_duplicates();
if ( lane_id == 0 )
{
Ac_rows[r_row_id] = count;
}
}
else
{
int ac_col_it = Ac_rows[r_row_id];
set.store_with_positions( &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] );
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Value_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 8 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_1x1( const int R_num_rows,
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
int gmem_size,
int *g_keys,
Value_type *g_vals,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The hash values stored in shared memory.
__shared__ volatile Word s_vote[NUM_WARPS * SMEM_SIZE / 4];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW;
const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// Create local storage for the set.
Hash_map<int, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id * SMEM_SIZE ],
&g_keys[r_row_id * gmem_size ],
&s_vote[warp_id * SMEM_SIZE / 4],
&g_vals[r_row_id * gmem_size ], gmem_size );
// Loop over rows of A.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the output row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first. TODO: Make sure it's needed. I don't think it is!!!!
map.clear();
// Populate the map.
map.load( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] );
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// The diagonal.
Value_type r_diag(types::util<Value_type>::get_zero());
// _iterate over the columns of A to build C_hat.
for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end;
// Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
// Update the diagonal (if needed).
if ( HAS_DIAG && is_active )
{
r_diag = r_diag + A_vals[A_diag[a_row_id]];
}
const int num_rows = __popc( utils::ballot(is_active) );
// Uniform loop: threads collaborate to load other elements.
for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS )
{
int local_k = k + lane_id_div_num_threads;
// Threads in the warp proceeds columns of B in the range [bColIt, bColEnd).
const int uniform_a_row_id = utils::shfl( a_row_id, local_k );
// The range of the row of B.
int a_col_it = 0, a_col_end = 0;
if ( local_k < num_rows )
{
a_col_it = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 0] );
a_col_end = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 1] );
}
// Iterate over the range of columns of B.
for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW )
{
// Load columns and values.
int a_col_id = -1;
Value_type a_value(types::util<Value_type>::get_zero());
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
a_value = A_vals[a_col_it];
}
// Find the aggregate.
int a_agg_id = -1;
if ( a_col_it < a_col_end )
{
a_agg_id = aggregates[a_col_id];
}
// Update the diag/hash map.
if ( HAS_DIAG && a_agg_id == r_row_id )
{
r_diag = r_diag + a_value;
a_agg_id = -1;
}
map.insert_with_duplicates( a_agg_id, a_value, NULL ); // It won't insert. Only update.
}
}
}
// Update the diagonal.
if ( HAS_DIAG )
{
r_diag = utils::warp_reduce<1, utils::Add>( r_diag );
if ( lane_id == 0 )
{
Ac_vals[Ac_diag[r_row_id]] = r_diag;
}
}
// Store the results.
int count = ac_col_end - ac_col_it;
if ( count == 0 )
{
continue;
}
map.store( count, &Ac_vals[ac_col_it] );
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Value_type, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG >
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__global__ __launch_bounds__( CTA_SIZE, 8 )
#else
__global__ __launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_4x4( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
const int gmem_size,
int *g_keys,
int *g_idx,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id( );
const int lane_id = utils::lane_id( );
// Constants.
const int lane_id_div_16 = lane_id / 16;
const int lane_id_mod_16 = lane_id % 16;
const int warp_offset = 16 * lane_id_div_16;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// My index.
Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] );
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first.
set.clear(true);
// Populate the index.
set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false );
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Diagonal value (each half warp stores a diagonal element).
Value_type ac_diag(types::util<Value_type>::get_zero());
// Iterate over the columns of R.
for ( r_col_it += lane_id_div_16 ; utils::any(r_col_it < r_col_end) ; r_col_it += 2 )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end;
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
// Update the diagonal if needed.
if ( HAS_DIAG && is_active )
{
ac_diag = ac_diag + A_vals[16 * A_diag[a_row_id] + lane_id_mod_16];
}
// Load the range of the row of A.
int a_col_begin = 0, a_col_end = 0;
if ( is_active )
{
a_col_begin = A_rows[a_row_id + 0];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the range of columns of B.
for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += 16 )
{
int a_col_it = a_col_begin + lane_id_mod_16;
// Each thread loads a column-ID and an aggregate.
int a_col_id = -1, ac_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols [a_col_it];
ac_col_id = aggregates[a_col_id];
}
// Each thread uses the hashed index to find the position associated with the aggregate.
int key = ac_col_id;
if ( HAS_DIAG && ac_col_id == r_row_id )
{
key = -1;
}
int ac_idx = ac_col_it + set.find_index( key, index, false );
// Iterate over the 16 items.
for ( int k = 0 ; k < 16 ; ++k )
{
int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k );
int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k );
// Early loop exit.
if ( utils::all( uniform_ac_col == -1 ) )
{
break;
}
// The index of the item.
const int uniform_a_col_it = a_col_begin + k;
// Load the value if possible.
Value_type a_value(types::util<Value_type>::get_zero());
if ( uniform_a_col_it < a_col_end )
{
a_value = A_vals[16 * uniform_a_col_it + lane_id_mod_16];
}
// Proceed diagonal if needed.
if ( HAS_DIAG && uniform_ac_col == r_row_id )
{
ac_diag = ac_diag + a_value;
uniform_ac_col = -1;
}
// Get the id of the column computed by the other half warp.
int other_ac_col = utils::shfl_xor( uniform_ac_col, 16 );
// If both half warps want to write to the same location, we have a conflict!!!
int are_fighting = uniform_ac_col == other_ac_col;
// Reduce the two values to a single one.
if ( uniform_ac_col != -1 && are_fighting )
{
a_value = a_value + utils::shfl_xor( a_value, 16 );
}
// If the two half warps fight, only one can be the winner... It's the 1st half!!!
int is_winner = !are_fighting || lane_id_div_16 == 0;
// Update the value.
if ( uniform_ac_col != -1 && is_winner )
{
Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] = Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] + a_value;
}
}
}
}
if ( HAS_DIAG )
{
ac_diag = ac_diag + utils::shfl_xor( ac_diag, 16 );
if ( lane_id_div_16 == 0 )
{
Ac_vals[16 * Ac_diag[r_row_id] + lane_id_mod_16] = ac_diag;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM >
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__global__ __launch_bounds__( CTA_SIZE, 8 )
#else
__global__ __launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_NxN( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
const int gmem_size,
int *g_keys,
int *g_idx,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int T_WARP = FORCE_DETERMINISM ? 1 : WARP_SIZE / NxN;
const int NUM_ITEMS_PER_WARP = T_WARP == 0 ? 1 : T_WARP;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id( );
const int lane_id = utils::lane_id( );
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
const int warp_offset = NxN * lane_id_div_NxN;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// My index.
Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] );
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first.
set.clear(true);
// Populate the index.
set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false );
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Diagonal value (each half warp stores a diagonal element).
Value_type ac_diag(types::util<Value_type>::get_zero());
// Iterate over the columns of R.
for ( r_col_it += lane_id_div_NxN ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP;
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
// Update the diagonal if needed.
if ( HAS_DIAG && is_active )
{
ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN];
}
// Load the range of the row of A.
int a_col_begin = 0, a_col_end = 0;
if ( is_active )
{
a_col_begin = A_rows[a_row_id + 0];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the range of columns of B.
for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN )
{
int a_col_it = a_col_begin + lane_id_mod_NxN;
// Is it active.
const bool is_active_k = a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP;
// Each thread loads a column-ID and an aggregate.
int a_col_id = -1, ac_col_id = -1;
if ( is_active_k )
{
a_col_id = A_cols [a_col_it];
ac_col_id = aggregates[a_col_id];
}
// Each thread uses the hashed index to find the position associated with the aggregate.
int key = ac_col_id;
if ( HAS_DIAG && ac_col_id == r_row_id )
{
key = -1;
}
int ac_idx = ac_col_it + set.find_index( key, index, false );
// Iterate over the NxN items.
for ( int k = 0 ; k < NxN ; ++k )
{
int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k );
int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k );
if ( lane_id_div_NxN >= NUM_ITEMS_PER_WARP )
{
uniform_ac_col = -1;
uniform_ac_idx = -1;
}
// Early loop exit.
if ( utils::all( uniform_ac_col == -1 ) )
{
break;
}
// The index of the item.
const int uniform_a_col_it = a_col_begin + k;
// Load the value if possible.
Value_type a_value(types::util<Value_type>::get_zero());
if ( uniform_a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP )
{
a_value = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
}
// Update the diagonal if it is a diagonal term.
if ( HAS_DIAG && uniform_ac_col == r_row_id )
{
ac_diag = ac_diag + a_value;
uniform_ac_col = -1;
}
// Update the value.
if ( uniform_ac_col != -1 )
{
utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + lane_id_mod_NxN], a_value );
}
}
}
}
if ( HAS_DIAG )
{
if ( !FORCE_DETERMINISM )
{
ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag );
}
if ( lane_id_div_NxN == 0 )
{
Ac_vals[NxN * Ac_diag[r_row_id] + lane_id_mod_NxN] = ac_diag;
}
}
}
}
// when blocksize is larger than warp size
template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM, int NUM_BLOCK_ITERS_PER_WARP>
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__global__ __launch_bounds__( CTA_SIZE, 8 )
#else
__global__ __launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_NxN_large( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
const int gmem_size,
int *g_keys,
int *g_idx,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp. Let's be chill here and take 1 per warp for large blocks
const int NUM_ITEMS_PER_WARP = 1;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id( );
const int lane_id = utils::lane_id( );
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// My index.
Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] );
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first.
set.clear(true);
// Populate the index.
set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false );
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Diagonal value (each half warp stores a diagonal element).
Value_type ac_diag(types::util<Value_type>::get_zero());
// Iterate over the columns of R.
for ( ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP )
{
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = R_cols[r_col_it];
// Update the diagonal if needed.
if ( HAS_DIAG )
{
ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id];
}
// Load the range of the row of A.
int a_col_begin = A_rows[a_row_id + 0];
int a_col_end = A_rows[a_row_id + 1];
// Iterate over the range of columns of B.
for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN )
{
int a_col_it = a_col_begin + lane_id;
// Is it active.
const bool is_active_k = a_col_it < a_col_end;
// Each thread loads a column-ID and an aggregate.
int a_col_id = -1, ac_col_id = -1;
if ( is_active_k )
{
a_col_id = A_cols [a_col_it];
ac_col_id = aggregates[a_col_id];
}
// Each thread uses the hashed index to find the position associated with the aggregate.
int key = ac_col_id;
if ( HAS_DIAG && ac_col_id == r_row_id )
{
key = -1;
}
int ac_idx = ac_col_it + set.find_index( key, index, false );
// Iterate over the NxN items.
for ( int k = 0 ; k < NxN ; ++k )
{
int uniform_ac_col = utils::shfl( ac_col_id, k );
int uniform_ac_idx = utils::shfl( ac_idx, k );
// Early loop exit.
if ( utils::all( uniform_ac_col == -1 ) )
{
break;
}
// The index of the item.
const int uniform_a_col_it = a_col_begin + k;
// iterate through the block
#pragma unroll
for (int i = 0; i < NUM_BLOCK_ITERS_PER_WARP; i++)
{
// Load the value if possible.
Value_type a_value(types::util<Value_type>::get_zero());
if ( uniform_a_col_it < a_col_end && (WARP_SIZE * i + lane_id) < NxN )
{
a_value = A_vals[NxN * uniform_a_col_it + WARP_SIZE * i + lane_id];
}
// Update the diagonal if it is a diagonal term.
if ( HAS_DIAG && uniform_ac_col == r_row_id )
{
ac_diag = ac_diag + a_value;
uniform_ac_col = -1;
}
// Update the value.
if ( uniform_ac_col != -1 && (WARP_SIZE * i + lane_id) < NxN)
{
utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + WARP_SIZE * i + lane_id], a_value );
}
}
}
}
}
if ( HAS_DIAG )
{
if ( !FORCE_DETERMINISM )
{
ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag );
}
Ac_vals[NxN * Ac_diag[r_row_id] + lane_id] = ac_diag;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
enum { WARP_SIZE = 32, SMEM_SIZE = 128 };
template< int CTA_SIZE, bool HAS_DIAG, bool COUNT_ONLY, typename Workspace >
static
void compute_sparsity_dispatch( Workspace &hash_wk,
const int R_num_rows,
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *aggregates,
int *Ac_rows,
int *Ac_cols,
int *Ac_pos )
{
hipDeviceProp_t props = getDeviceProperties();
int GRID_SIZE = (props.major >= 7) ? 1024 : 128;
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
int *h_status;
thrust::global_thread_handle::hipHostMalloc((void **) &h_status, sizeof(int));
int *h_work_offset;
thrust::global_thread_handle::hipHostMalloc((void **) &h_work_offset, sizeof(int));
int attempt = 0;
bool warning_printed = 0;
for ( bool done = false ; !done && attempt < 10 ; ++attempt )
{
// Double the amount of GMEM (if needed).
if ( attempt > 0 )
{
if (!warning_printed)
{
amgx_printf("WARNING: Used settings might result in degraded performance for the MG coarsener for this matrix.\n");
amgx_printf("WARNING: You might want to try different selector or MG algorithm for better performance.\n");
warning_printed = 1;
}
hash_wk.expand();
}
// Reset the status.
int *p_status = h_status;
*p_status = 0;
hipMemcpyAsync( hash_wk.get_status(), p_status, sizeof(int), hipMemcpyHostToDevice, thrust::global_thread_handle::get_stream() );
cudaCheckError();
// Reset the work queue.
int *p_work_offset = h_work_offset;
*p_work_offset = GRID_SIZE * NUM_WARPS;
hipMemcpyAsync( hash_wk.get_work_queue(), p_work_offset, sizeof(int), hipMemcpyHostToDevice, thrust::global_thread_handle::get_stream() );
cudaCheckError();
// Launch the kernel.
hipLaunchKernelGGL(( compute_sparsity_kernel<8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, HAS_DIAG, COUNT_ONLY>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, thrust::global_thread_handle::get_stream(),
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
hash_wk.get_work_queue(),
hash_wk.get_status() );
cudaCheckError();
// Read the result from count_non_zeroes.
hipMemcpyAsync( p_status, hash_wk.get_status(), sizeof(int), hipMemcpyDeviceToHost, thrust::global_thread_handle::get_stream() );
hipStreamSynchronize(thrust::global_thread_handle::get_stream());
done = (*p_status == 0);
cudaCheckError();
}
thrust::global_thread_handle::hipHostFree(h_status);
thrust::global_thread_handle::hipHostFree(h_work_offset);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int CTA_SIZE, bool HAS_DIAG, typename Workspace, typename Value_type >
static
void fill_A_dispatch( Workspace &hash_wk,
const int block_size,
const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
bool force_determinism )
{
hipDeviceProp_t props = getDeviceProperties();
int GRID_SIZE = (props.major >= 7) ? 1024 : 128;
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
int work_offset = GRID_SIZE * NUM_WARPS;
hipMemcpyAsync( hash_wk.get_work_queue(), &work_offset, sizeof(int), hipMemcpyHostToDevice, thrust::global_thread_handle::get_stream() );
cudaCheckError();
// Launch the kernel.
switch ( block_size )
{
case 1:
hipLaunchKernelGGL(( fill_A_kernel_1x1<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
hash_wk.get_vals(),
hash_wk.get_work_queue() );
break;
case 2:
hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 2, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 3:
hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 3, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 4:
if ( force_determinism )
hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 4, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, true>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
else
hipLaunchKernelGGL(( fill_A_kernel_4x4<Value_type, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, thrust::global_thread_handle::get_stream(),
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 5:
hipLaunchKernelGGL(( fill_A_kernel_NxN<Value_type, 5, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 8:
hipLaunchKernelGGL(( fill_A_kernel_NxN_large<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 2>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 10:
hipLaunchKernelGGL(( fill_A_kernel_NxN_large<Value_type, 10, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 4>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
default:
FatalError( "LOW_DEG not implemented for this block size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
LowDegCoarseAGenerator<TemplateConfig<AMGX_device, V, M, I> >::computeAOperator( const Matrix_d &A,
Matrix_d &Ac,
const IVector &aggregates,
const IVector &R_row_offsets,
const IVector &R_column_indices,
const int num_aggregates )
{
if ( A.get_block_dimx() != A.get_block_dimy() )
{
FatalError( "LowDegCoarseAGenerator implemented for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
// The matrix Ac will be modified.
Ac.set_initialized(0);
// Is the diagonal stored separately??
const int diag_prop = A.hasProps(DIAG);
// Allocate a workspace for hashing.
typedef TemplateConfig<AMGX_device, V, M, I> TConfig_d;
hipDeviceProp_t props = getDeviceProperties();
int grid_size = (props.major >= 7) ? 1024 : 128;
Hash_Workspace<TConfig_d, int> hash_wk(true, grid_size);
// Compute row offsets of Ac.
Ac.addProps(CSR);
Ac.set_num_rows( num_aggregates );
Ac.set_num_cols( num_aggregates );
Ac.row_offsets.resize( num_aggregates + 1 );
// Compute the number of non-zero elements per row of Ac.
const int CTA_SIZE = 128;
if ( diag_prop )
compute_sparsity_dispatch<CTA_SIZE, true, true>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
NULL,
NULL );
else
compute_sparsity_dispatch<CTA_SIZE, false, true>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
NULL,
NULL );
cudaCheckError();
// Compute the number of non-zeroes.
thrust_wrapper::exclusive_scan( Ac.row_offsets.begin(), Ac.row_offsets.end(), Ac.row_offsets.begin() );
cudaCheckError();
int nonzero_blocks = Ac.row_offsets[num_aggregates];
if ( diag_prop )
{
Ac.addProps(DIAG);
}
if ( A.is_matrix_singleGPU() )
{
Ac.resize( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), !diag_prop );
}
else
{
//have 3% more nz for storage
Ac.resize_spare( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), 1.0 );
if ( diag_prop )
{
Ac.computeDiagonal();
}
}
// Vector to store the positions in the hash table.
device_vector_alloc<int> Ac_pos(nonzero_blocks);
// Compute the sparsity pattern of the rows of Ac.
if ( diag_prop )
compute_sparsity_dispatch<CTA_SIZE, true, false>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ));
else
compute_sparsity_dispatch<CTA_SIZE, false, false>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ));
cudaCheckError();
// Reset values if needed.
if ( A.get_block_dimy() != 1 )
{
thrust::fill( Ac.values.begin(), Ac.values.end(), types::util<ValueType>::get_zero() );
cudaCheckError();
}
// Compute values.
if ( diag_prop )
{
fill_A_dispatch<CTA_SIZE, true>(
hash_wk,
A.get_block_dimy(),
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ),
Ac.diag.raw(),
Ac.values.raw(),
this->m_force_determinism );
}
else
{
fill_A_dispatch<CTA_SIZE, false>(
hash_wk,
A.get_block_dimy(),
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ),
Ac.diag.raw(),
Ac.values.raw(),
this->m_force_determinism );
}
cudaCheckError();
// Update the diagonal if needed.
if ( Ac.is_matrix_singleGPU() )
{
Ac.computeDiagonal();
}
cudaCheckError();
// Finalize the modification.
Ac.set_initialized(1);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
LowDegCoarseAGenerator<TemplateConfig<AMGX_host, V, M, I> >::computeAOperator( const Matrix_h &h_A,
Matrix_h &h_Ac,
const IVector &h_aggregates,
const IVector &h_R_row_offsets,
const IVector &h_R_column_indices,
const int num_aggregates )
{
h_Ac.set_initialized(0);
IVector rows;
IVector inds;
typename Matrix_h::MVector vals;
typename Matrix_h::MVector diag;
int num_nnz = 0;
int diag_prop = h_A.hasProps(DIAG);
for ( int row = 0; row < num_aggregates; row++ )
{
for ( int col = 0; col < num_aggregates; col++ )
{
int fill = 0;
typename Matrix_h::MVector cur(h_A.get_block_size(), types::util<typename Matrix_h::value_type>::get_zero());
for ( int rc = h_R_row_offsets[row]; rc < h_R_row_offsets[row + 1]; rc++ )
{
int j = h_R_column_indices[rc];
for ( int ac = h_A.row_offsets[j]; ac < h_A.row_offsets[j + 1] + diag_prop; ac++ )
{
int k = (ac == h_A.row_offsets[j + 1]) ? j : h_A.col_indices[ac];
for ( int q = h_R_row_offsets[col]; q < h_R_row_offsets[col + 1]; q++ )
if ( k == h_R_column_indices[q] )
{
fill = 1;
int val_idx = (ac == h_A.row_offsets[j + 1]) ? h_A.get_num_nz() + j : ac;
for ( int v = 0; v < h_A.get_block_size(); v++)
{
cur[v] = cur[v] + h_A.values[val_idx * h_A.get_block_size() + v];
}
}
}
}
if ( fill )
{
if ( row != col || !diag_prop )
{
inds.push_back(col);
rows.push_back(row);
num_nnz++;
for ( int v = 0; v < h_A.get_block_size(); v++ )
{
vals.push_back(cur[v]);
}
}
else
{
for ( int v = 0; v < h_A.get_block_size(); v++ )
{
diag.push_back(cur[v]);
}
}
}
}
}
rows.push_back(-1);
// add diagonal to the end
if ( diag_prop )
{
for ( int v = 0; v < num_aggregates * h_A.get_block_size(); v++ )
{
vals.push_back(diag[v]);
}
}
else
{
// Add a zero at the end
for (int v = 0; v < h_A.get_block_size(); v++)
{
vals.push_back(types::util<typename Matrix_h::value_type>::get_zero());
}
}
h_Ac.resize(num_aggregates, num_aggregates, num_nnz, h_A.get_block_dimx(), h_A.get_block_dimy(), 1);
h_Ac.row_indices = rows;
h_Ac.col_indices = inds;
h_Ac.values = vals;
h_Ac.addProps( CSR | ( diag_prop ? DIAG : 0 ) );
h_Ac.computeDiagonal();
h_Ac.set_initialized(1);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define AMGX_CASE_LINE(CASE) template class LowDegCoarseAGenerator<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace aggregation
} // namespace amgx
| 151e376d589bd8ee9ff49bcf248895191441a1b0.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h>
#include <thrust/system/detail/generic/reduce_by_key.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust_wrapper.h>
#include <error.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <misc.h>
#include <logger.h>
#include <hash_workspace.h>
#include <matrix_io.h>
#include <device_properties.h>
#include <amgx_types/util.h>
namespace amgx
{
namespace aggregation
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <sm_utils.inl>
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
#include <hash_containers_sm70.inl> // Included inside the namespace to solve name collisions.
static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 )
{
int offset = -1;
if ( utils::lane_id() == 0 )
{
offset = atomicAdd( queue, count );
}
return utils::shfl( offset, 0 );
}
#else
#include <hash_containers_sm35.inl> // Included inside the namespace to solve name collisions.
static __device__ __forceinline__ int get_work( int *queue, int warp_id, int count = 1 )
{
int offset = -1;
if ( utils::lane_id() == 0 )
{
offset = atomicAdd( queue, count );
}
return utils::shfl( offset, 0 );
}
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool COUNT_ONLY >
__global__ __launch_bounds__( CTA_SIZE )
void
compute_sparsity_kernel( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *aggregates,
int *Ac_rows,
int *Ac_cols,
int *Ac_pos,
const int gmem_size,
int *g_keys,
int *wk_work_queue,
int *wk_status )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW;
// The hash keys stored in shared memory.
__shared__ int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW;
const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// Make sure we have to proceed.
if ( COUNT_ONLY )
{
volatile int *status = reinterpret_cast<volatile int *>( wk_status );
if ( set.has_failed() || *status != 0 )
{
return;
}
}
// Clear the set.
set.clear();
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Iterate over the columns of R.
for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end;
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
const int num_rows = __popc( utils::ballot(is_active) );
// Uniform loop: threads collaborate to load other elements.
for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS )
{
int local_k = k + lane_id_div_num_threads;
// Is it an active thread.
bool is_active_k = local_k < num_rows;
// Threads in the warp proceeds columns of B in the range [bColIt, bColEnd).
const int uniform_a_row_id = utils::shfl( a_row_id, local_k );
// Load the range of the row of B.
int a_col_it = 0, a_col_end = 0;
if ( is_active_k )
{
a_col_it = A_rows[uniform_a_row_id + 0];
a_col_end = A_rows[uniform_a_row_id + 1];
}
// Iterate over the range of columns of B.
for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW )
{
int a_col_id = -1, a_agg_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
a_agg_id = aggregates[a_col_id];
}
if ( HAS_DIAG && a_agg_id == r_row_id )
{
a_agg_id = -1;
}
set.insert( a_agg_id, COUNT_ONLY ? wk_status : NULL );
}
}
}
// Store the results.
if ( COUNT_ONLY )
{
int count = set.compute_size_with_duplicates();
if ( lane_id == 0 )
{
Ac_rows[r_row_id] = count;
}
}
else
{
int ac_col_it = Ac_rows[r_row_id];
set.store_with_positions( &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] );
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Value_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 8 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_1x1( const int R_num_rows,
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
int gmem_size,
int *g_keys,
Value_type *g_vals,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
const int NUM_LOADED_ROWS = WARP_SIZE / NUM_THREADS_PER_ROW;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The hash values stored in shared memory.
__shared__ volatile Word s_vote[NUM_WARPS * SMEM_SIZE / 4];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW;
const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// Create local storage for the set.
Hash_map<int, Value_type, SMEM_SIZE, 4, WARP_SIZE> map( &s_keys[warp_id * SMEM_SIZE ],
&g_keys[r_row_id * gmem_size ],
&s_vote[warp_id * SMEM_SIZE / 4],
&g_vals[r_row_id * gmem_size ], gmem_size );
// Loop over rows of A.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the output row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first. TODO: Make sure it's needed. I don't think it is!!!!
map.clear();
// Populate the map.
map.load( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it] );
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// The diagonal.
Value_type r_diag(types::util<Value_type>::get_zero());
// _iterate over the columns of A to build C_hat.
for ( r_col_it += lane_id ; utils::any(r_col_it < r_col_end) ; r_col_it += WARP_SIZE )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end;
// Columns of A maps to rows of B. Each thread of the warp loads its A-col/B-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
// Update the diagonal (if needed).
if ( HAS_DIAG && is_active )
{
r_diag = r_diag + A_vals[A_diag[a_row_id]];
}
const int num_rows = __popc( utils::ballot(is_active) );
// Uniform loop: threads collaborate to load other elements.
for ( int k = 0 ; k < num_rows ; k += NUM_LOADED_ROWS )
{
int local_k = k + lane_id_div_num_threads;
// Threads in the warp proceeds columns of B in the range [bColIt, bColEnd).
const int uniform_a_row_id = utils::shfl( a_row_id, local_k );
// The range of the row of B.
int a_col_it = 0, a_col_end = 0;
if ( local_k < num_rows )
{
a_col_it = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 0] );
a_col_end = utils::Ld<utils::LD_CG>::load( &A_rows[uniform_a_row_id + 1] );
}
// Iterate over the range of columns of B.
for ( a_col_it += lane_id_mod_num_threads ; utils::any(a_col_it < a_col_end) ; a_col_it += NUM_THREADS_PER_ROW )
{
// Load columns and values.
int a_col_id = -1;
Value_type a_value(types::util<Value_type>::get_zero());
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
a_value = A_vals[a_col_it];
}
// Find the aggregate.
int a_agg_id = -1;
if ( a_col_it < a_col_end )
{
a_agg_id = aggregates[a_col_id];
}
// Update the diag/hash map.
if ( HAS_DIAG && a_agg_id == r_row_id )
{
r_diag = r_diag + a_value;
a_agg_id = -1;
}
map.insert_with_duplicates( a_agg_id, a_value, NULL ); // It won't insert. Only update.
}
}
}
// Update the diagonal.
if ( HAS_DIAG )
{
r_diag = utils::warp_reduce<1, utils::Add>( r_diag );
if ( lane_id == 0 )
{
Ac_vals[Ac_diag[r_row_id]] = r_diag;
}
}
// Store the results.
int count = ac_col_end - ac_col_it;
if ( count == 0 )
{
continue;
}
map.store( count, &Ac_vals[ac_col_it] );
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Value_type, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG >
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__global__ __launch_bounds__( CTA_SIZE, 8 )
#else
__global__ __launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_4x4( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
const int gmem_size,
int *g_keys,
int *g_idx,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id( );
const int lane_id = utils::lane_id( );
// Constants.
const int lane_id_div_16 = lane_id / 16;
const int lane_id_mod_16 = lane_id % 16;
const int warp_offset = 16 * lane_id_div_16;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// My index.
Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] );
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first.
set.clear(true);
// Populate the index.
set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false );
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Diagonal value (each half warp stores a diagonal element).
Value_type ac_diag(types::util<Value_type>::get_zero());
// Iterate over the columns of R.
for ( r_col_it += lane_id_div_16 ; utils::any(r_col_it < r_col_end) ; r_col_it += 2 )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end;
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
// Update the diagonal if needed.
if ( HAS_DIAG && is_active )
{
ac_diag = ac_diag + A_vals[16 * A_diag[a_row_id] + lane_id_mod_16];
}
// Load the range of the row of A.
int a_col_begin = 0, a_col_end = 0;
if ( is_active )
{
a_col_begin = A_rows[a_row_id + 0];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the range of columns of B.
for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += 16 )
{
int a_col_it = a_col_begin + lane_id_mod_16;
// Each thread loads a column-ID and an aggregate.
int a_col_id = -1, ac_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols [a_col_it];
ac_col_id = aggregates[a_col_id];
}
// Each thread uses the hashed index to find the position associated with the aggregate.
int key = ac_col_id;
if ( HAS_DIAG && ac_col_id == r_row_id )
{
key = -1;
}
int ac_idx = ac_col_it + set.find_index( key, index, false );
// Iterate over the 16 items.
for ( int k = 0 ; k < 16 ; ++k )
{
int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k );
int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k );
// Early loop exit.
if ( utils::all( uniform_ac_col == -1 ) )
{
break;
}
// The index of the item.
const int uniform_a_col_it = a_col_begin + k;
// Load the value if possible.
Value_type a_value(types::util<Value_type>::get_zero());
if ( uniform_a_col_it < a_col_end )
{
a_value = A_vals[16 * uniform_a_col_it + lane_id_mod_16];
}
// Proceed diagonal if needed.
if ( HAS_DIAG && uniform_ac_col == r_row_id )
{
ac_diag = ac_diag + a_value;
uniform_ac_col = -1;
}
// Get the id of the column computed by the other half warp.
int other_ac_col = utils::shfl_xor( uniform_ac_col, 16 );
// If both half warps want to write to the same location, we have a conflict!!!
int are_fighting = uniform_ac_col == other_ac_col;
// Reduce the two values to a single one.
if ( uniform_ac_col != -1 && are_fighting )
{
a_value = a_value + utils::shfl_xor( a_value, 16 );
}
// If the two half warps fight, only one can be the winner... It's the 1st half!!!
int is_winner = !are_fighting || lane_id_div_16 == 0;
// Update the value.
if ( uniform_ac_col != -1 && is_winner )
{
Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] = Ac_vals[16 * uniform_ac_idx + lane_id_mod_16] + a_value;
}
}
}
}
if ( HAS_DIAG )
{
ac_diag = ac_diag + utils::shfl_xor( ac_diag, 16 );
if ( lane_id_div_16 == 0 )
{
Ac_vals[16 * Ac_diag[r_row_id] + lane_id_mod_16] = ac_diag;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM >
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__global__ __launch_bounds__( CTA_SIZE, 8 )
#else
__global__ __launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_NxN( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
const int gmem_size,
int *g_keys,
int *g_idx,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int T_WARP = FORCE_DETERMINISM ? 1 : WARP_SIZE / NxN;
const int NUM_ITEMS_PER_WARP = T_WARP == 0 ? 1 : T_WARP;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id( );
const int lane_id = utils::lane_id( );
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
const int warp_offset = NxN * lane_id_div_NxN;
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// My index.
Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] );
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first.
set.clear(true);
// Populate the index.
set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false );
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Diagonal value (each half warp stores a diagonal element).
Value_type ac_diag(types::util<Value_type>::get_zero());
// Iterate over the columns of R.
for ( r_col_it += lane_id_div_NxN ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP )
{
// Is it an active thread.
const bool is_active = r_col_it < r_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP;
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = -1;
if ( is_active )
{
a_row_id = R_cols[r_col_it];
}
// Update the diagonal if needed.
if ( HAS_DIAG && is_active )
{
ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN];
}
// Load the range of the row of A.
int a_col_begin = 0, a_col_end = 0;
if ( is_active )
{
a_col_begin = A_rows[a_row_id + 0];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the range of columns of B.
for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN )
{
int a_col_it = a_col_begin + lane_id_mod_NxN;
// Is it active.
const bool is_active_k = a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP;
// Each thread loads a column-ID and an aggregate.
int a_col_id = -1, ac_col_id = -1;
if ( is_active_k )
{
a_col_id = A_cols [a_col_it];
ac_col_id = aggregates[a_col_id];
}
// Each thread uses the hashed index to find the position associated with the aggregate.
int key = ac_col_id;
if ( HAS_DIAG && ac_col_id == r_row_id )
{
key = -1;
}
int ac_idx = ac_col_it + set.find_index( key, index, false );
// Iterate over the NxN items.
for ( int k = 0 ; k < NxN ; ++k )
{
int uniform_ac_col = utils::shfl( ac_col_id, warp_offset + k );
int uniform_ac_idx = utils::shfl( ac_idx, warp_offset + k );
if ( lane_id_div_NxN >= NUM_ITEMS_PER_WARP )
{
uniform_ac_col = -1;
uniform_ac_idx = -1;
}
// Early loop exit.
if ( utils::all( uniform_ac_col == -1 ) )
{
break;
}
// The index of the item.
const int uniform_a_col_it = a_col_begin + k;
// Load the value if possible.
Value_type a_value(types::util<Value_type>::get_zero());
if ( uniform_a_col_it < a_col_end && lane_id_div_NxN < NUM_ITEMS_PER_WARP )
{
a_value = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
}
// Update the diagonal if it is a diagonal term.
if ( HAS_DIAG && uniform_ac_col == r_row_id )
{
ac_diag = ac_diag + a_value;
uniform_ac_col = -1;
}
// Update the value.
if ( uniform_ac_col != -1 )
{
utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + lane_id_mod_NxN], a_value );
}
}
}
}
if ( HAS_DIAG )
{
if ( !FORCE_DETERMINISM )
{
ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag );
}
if ( lane_id_div_NxN == 0 )
{
Ac_vals[NxN * Ac_diag[r_row_id] + lane_id_mod_NxN] = ac_diag;
}
}
}
}
// when blocksize is larger than warp size
template< typename Value_type, int N, int CTA_SIZE, int SMEM_SIZE, int WARP_SIZE, bool HAS_DIAG, bool FORCE_DETERMINISM, int NUM_BLOCK_ITERS_PER_WARP>
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__global__ __launch_bounds__( CTA_SIZE, 8 )
#else
__global__ __launch_bounds__( CTA_SIZE, 8 )
#endif
void fill_A_kernel_NxN_large( const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
const int gmem_size,
int *g_keys,
int *g_idx,
int *wk_work_queue )
{
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp. Let's be chill here and take 1 per warp for large blocks
const int NUM_ITEMS_PER_WARP = 1;
// The hash keys stored in shared memory.
__shared__ volatile int s_keys[NUM_WARPS * SMEM_SIZE];
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id( );
const int lane_id = utils::lane_id( );
// First threads load the row IDs of A needed by the CTA...
int r_row_id = blockIdx.x * NUM_WARPS + warp_id;
// My index.
Hash_index<int, SMEM_SIZE, WARP_SIZE> index( &g_idx[r_row_id * gmem_size] );
// Create local storage for the set.
Hash_set<int, SMEM_SIZE, 4, WARP_SIZE> set( &s_keys[warp_id * SMEM_SIZE], &g_keys[r_row_id * gmem_size], gmem_size );
// Loop over rows of R.
for ( ; r_row_id < R_num_rows ; r_row_id = get_work( wk_work_queue, warp_id ) )
{
// The indices of the row.
int ac_col_it = Ac_rows[r_row_id + 0];
int ac_col_end = Ac_rows[r_row_id + 1];
// Clear the set first.
set.clear(true);
// Populate the index.
set.load_index( ac_col_end - ac_col_it, &Ac_cols[ac_col_it], &Ac_pos[ac_col_it], index, false );
// Load the range of the row.
int r_col_it = R_rows[r_row_id + 0];
int r_col_end = R_rows[r_row_id + 1];
// Diagonal value (each half warp stores a diagonal element).
Value_type ac_diag(types::util<Value_type>::get_zero());
// Iterate over the columns of R.
for ( ; utils::any(r_col_it < r_col_end) ; r_col_it += NUM_ITEMS_PER_WARP )
{
// Columns of R map to rows of A. Each thread of the warp loads its R-col/A-row ID.
int a_row_id = R_cols[r_col_it];
// Update the diagonal if needed.
if ( HAS_DIAG )
{
ac_diag = ac_diag + A_vals[NxN * A_diag[a_row_id] + lane_id];
}
// Load the range of the row of A.
int a_col_begin = A_rows[a_row_id + 0];
int a_col_end = A_rows[a_row_id + 1];
// Iterate over the range of columns of B.
for ( ; utils::any(a_col_begin < a_col_end) ; a_col_begin += NxN )
{
int a_col_it = a_col_begin + lane_id;
// Is it active.
const bool is_active_k = a_col_it < a_col_end;
// Each thread loads a column-ID and an aggregate.
int a_col_id = -1, ac_col_id = -1;
if ( is_active_k )
{
a_col_id = A_cols [a_col_it];
ac_col_id = aggregates[a_col_id];
}
// Each thread uses the hashed index to find the position associated with the aggregate.
int key = ac_col_id;
if ( HAS_DIAG && ac_col_id == r_row_id )
{
key = -1;
}
int ac_idx = ac_col_it + set.find_index( key, index, false );
// Iterate over the NxN items.
for ( int k = 0 ; k < NxN ; ++k )
{
int uniform_ac_col = utils::shfl( ac_col_id, k );
int uniform_ac_idx = utils::shfl( ac_idx, k );
// Early loop exit.
if ( utils::all( uniform_ac_col == -1 ) )
{
break;
}
// The index of the item.
const int uniform_a_col_it = a_col_begin + k;
// iterate through the block
#pragma unroll
for (int i = 0; i < NUM_BLOCK_ITERS_PER_WARP; i++)
{
// Load the value if possible.
Value_type a_value(types::util<Value_type>::get_zero());
if ( uniform_a_col_it < a_col_end && (WARP_SIZE * i + lane_id) < NxN )
{
a_value = A_vals[NxN * uniform_a_col_it + WARP_SIZE * i + lane_id];
}
// Update the diagonal if it is a diagonal term.
if ( HAS_DIAG && uniform_ac_col == r_row_id )
{
ac_diag = ac_diag + a_value;
uniform_ac_col = -1;
}
// Update the value.
if ( uniform_ac_col != -1 && (WARP_SIZE * i + lane_id) < NxN)
{
utils::atomic_add( &Ac_vals[NxN * uniform_ac_idx + WARP_SIZE * i + lane_id], a_value );
}
}
}
}
}
if ( HAS_DIAG )
{
if ( !FORCE_DETERMINISM )
{
ac_diag = utils::warp_reduce<NxN, utils::Add>( ac_diag );
}
Ac_vals[NxN * Ac_diag[r_row_id] + lane_id] = ac_diag;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
enum { WARP_SIZE = 32, SMEM_SIZE = 128 };
template< int CTA_SIZE, bool HAS_DIAG, bool COUNT_ONLY, typename Workspace >
static
void compute_sparsity_dispatch( Workspace &hash_wk,
const int R_num_rows,
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *aggregates,
int *Ac_rows,
int *Ac_cols,
int *Ac_pos )
{
cudaDeviceProp props = getDeviceProperties();
int GRID_SIZE = (props.major >= 7) ? 1024 : 128;
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
int *h_status;
thrust::global_thread_handle::cudaMallocHost((void **) &h_status, sizeof(int));
int *h_work_offset;
thrust::global_thread_handle::cudaMallocHost((void **) &h_work_offset, sizeof(int));
int attempt = 0;
bool warning_printed = 0;
for ( bool done = false ; !done && attempt < 10 ; ++attempt )
{
// Double the amount of GMEM (if needed).
if ( attempt > 0 )
{
if (!warning_printed)
{
amgx_printf("WARNING: Used settings might result in degraded performance for the MG coarsener for this matrix.\n");
amgx_printf("WARNING: You might want to try different selector or MG algorithm for better performance.\n");
warning_printed = 1;
}
hash_wk.expand();
}
// Reset the status.
int *p_status = h_status;
*p_status = 0;
cudaMemcpyAsync( hash_wk.get_status(), p_status, sizeof(int), cudaMemcpyHostToDevice, thrust::global_thread_handle::get_stream() );
cudaCheckError();
// Reset the work queue.
int *p_work_offset = h_work_offset;
*p_work_offset = GRID_SIZE * NUM_WARPS;
cudaMemcpyAsync( hash_wk.get_work_queue(), p_work_offset, sizeof(int), cudaMemcpyHostToDevice, thrust::global_thread_handle::get_stream() );
cudaCheckError();
// Launch the kernel.
compute_sparsity_kernel<8, CTA_SIZE, SMEM_SIZE, WARP_SIZE, HAS_DIAG, COUNT_ONLY> <<< GRID_SIZE, CTA_SIZE, 0, thrust::global_thread_handle::get_stream()>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
hash_wk.get_work_queue(),
hash_wk.get_status() );
cudaCheckError();
// Read the result from count_non_zeroes.
cudaMemcpyAsync( p_status, hash_wk.get_status(), sizeof(int), cudaMemcpyDeviceToHost, thrust::global_thread_handle::get_stream() );
cudaStreamSynchronize(thrust::global_thread_handle::get_stream());
done = (*p_status == 0);
cudaCheckError();
}
thrust::global_thread_handle::cudaFreeHost(h_status);
thrust::global_thread_handle::cudaFreeHost(h_work_offset);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int CTA_SIZE, bool HAS_DIAG, typename Workspace, typename Value_type >
static
void fill_A_dispatch( Workspace &hash_wk,
const int block_size,
const int R_num_rows, // same as num_aggregates.
const int *R_rows,
const int *R_cols,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *aggregates,
const int *Ac_rows,
const int *Ac_cols,
const int *Ac_pos,
const int *Ac_diag,
Value_type *Ac_vals,
bool force_determinism )
{
cudaDeviceProp props = getDeviceProperties();
int GRID_SIZE = (props.major >= 7) ? 1024 : 128;
const int NUM_WARPS = CTA_SIZE / WARP_SIZE;
int work_offset = GRID_SIZE * NUM_WARPS;
cudaMemcpyAsync( hash_wk.get_work_queue(), &work_offset, sizeof(int), cudaMemcpyHostToDevice, thrust::global_thread_handle::get_stream() );
cudaCheckError();
// Launch the kernel.
switch ( block_size )
{
case 1:
fill_A_kernel_1x1<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG> <<< GRID_SIZE, CTA_SIZE>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
hash_wk.get_vals(),
hash_wk.get_work_queue() );
break;
case 2:
fill_A_kernel_NxN<Value_type, 2, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false> <<< GRID_SIZE, CTA_SIZE>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 3:
fill_A_kernel_NxN<Value_type, 3, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false> <<< GRID_SIZE, CTA_SIZE>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 4:
if ( force_determinism )
fill_A_kernel_NxN<Value_type, 4, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, true> <<< GRID_SIZE, CTA_SIZE>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
else
fill_A_kernel_4x4<Value_type, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG> <<< GRID_SIZE, CTA_SIZE, 0, thrust::global_thread_handle::get_stream()>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 5:
fill_A_kernel_NxN<Value_type, 5, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false> <<< GRID_SIZE, CTA_SIZE>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 8:
fill_A_kernel_NxN_large<Value_type, 8, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 2> <<< GRID_SIZE, CTA_SIZE>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
case 10:
fill_A_kernel_NxN_large<Value_type, 10, CTA_SIZE, SMEM_SIZE, 32, HAS_DIAG, false, 4> <<< GRID_SIZE, CTA_SIZE>>>(
R_num_rows,
R_rows,
R_cols,
A_rows,
A_cols,
A_diag,
A_vals,
aggregates,
Ac_rows,
Ac_cols,
Ac_pos,
Ac_diag,
Ac_vals,
hash_wk.get_gmem_size(),
hash_wk.get_keys(),
reinterpret_cast<int *>( hash_wk.get_vals() ),
hash_wk.get_work_queue() );
break;
default:
FatalError( "LOW_DEG not implemented for this block size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
LowDegCoarseAGenerator<TemplateConfig<AMGX_device, V, M, I> >::computeAOperator( const Matrix_d &A,
Matrix_d &Ac,
const IVector &aggregates,
const IVector &R_row_offsets,
const IVector &R_column_indices,
const int num_aggregates )
{
if ( A.get_block_dimx() != A.get_block_dimy() )
{
FatalError( "LowDegCoarseAGenerator implemented for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
// The matrix Ac will be modified.
Ac.set_initialized(0);
// Is the diagonal stored separately??
const int diag_prop = A.hasProps(DIAG);
// Allocate a workspace for hashing.
typedef TemplateConfig<AMGX_device, V, M, I> TConfig_d;
cudaDeviceProp props = getDeviceProperties();
int grid_size = (props.major >= 7) ? 1024 : 128;
Hash_Workspace<TConfig_d, int> hash_wk(true, grid_size);
// Compute row offsets of Ac.
Ac.addProps(CSR);
Ac.set_num_rows( num_aggregates );
Ac.set_num_cols( num_aggregates );
Ac.row_offsets.resize( num_aggregates + 1 );
// Compute the number of non-zero elements per row of Ac.
const int CTA_SIZE = 128;
if ( diag_prop )
compute_sparsity_dispatch<CTA_SIZE, true, true>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
NULL,
NULL );
else
compute_sparsity_dispatch<CTA_SIZE, false, true>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
NULL,
NULL );
cudaCheckError();
// Compute the number of non-zeroes.
thrust_wrapper::exclusive_scan( Ac.row_offsets.begin(), Ac.row_offsets.end(), Ac.row_offsets.begin() );
cudaCheckError();
int nonzero_blocks = Ac.row_offsets[num_aggregates];
if ( diag_prop )
{
Ac.addProps(DIAG);
}
if ( A.is_matrix_singleGPU() )
{
Ac.resize( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), !diag_prop );
}
else
{
//have 3% more nz for storage
Ac.resize_spare( num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx(), 1.0 );
if ( diag_prop )
{
Ac.computeDiagonal();
}
}
// Vector to store the positions in the hash table.
device_vector_alloc<int> Ac_pos(nonzero_blocks);
// Compute the sparsity pattern of the rows of Ac.
if ( diag_prop )
compute_sparsity_dispatch<CTA_SIZE, true, false>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ));
else
compute_sparsity_dispatch<CTA_SIZE, false, false>(
hash_wk,
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ));
cudaCheckError();
// Reset values if needed.
if ( A.get_block_dimy() != 1 )
{
thrust::fill( Ac.values.begin(), Ac.values.end(), types::util<ValueType>::get_zero() );
cudaCheckError();
}
// Compute values.
if ( diag_prop )
{
fill_A_dispatch<CTA_SIZE, true>(
hash_wk,
A.get_block_dimy(),
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ),
Ac.diag.raw(),
Ac.values.raw(),
this->m_force_determinism );
}
else
{
fill_A_dispatch<CTA_SIZE, false>(
hash_wk,
A.get_block_dimy(),
num_aggregates,
R_row_offsets.raw(),
R_column_indices.raw(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
aggregates.raw(),
Ac.row_offsets.raw(),
Ac.col_indices.raw(),
thrust::raw_pointer_cast( &Ac_pos.front() ),
Ac.diag.raw(),
Ac.values.raw(),
this->m_force_determinism );
}
cudaCheckError();
// Update the diagonal if needed.
if ( Ac.is_matrix_singleGPU() )
{
Ac.computeDiagonal();
}
cudaCheckError();
// Finalize the modification.
Ac.set_initialized(1);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void
LowDegCoarseAGenerator<TemplateConfig<AMGX_host, V, M, I> >::computeAOperator( const Matrix_h &h_A,
Matrix_h &h_Ac,
const IVector &h_aggregates,
const IVector &h_R_row_offsets,
const IVector &h_R_column_indices,
const int num_aggregates )
{
h_Ac.set_initialized(0);
IVector rows;
IVector inds;
typename Matrix_h::MVector vals;
typename Matrix_h::MVector diag;
int num_nnz = 0;
int diag_prop = h_A.hasProps(DIAG);
for ( int row = 0; row < num_aggregates; row++ )
{
for ( int col = 0; col < num_aggregates; col++ )
{
int fill = 0;
typename Matrix_h::MVector cur(h_A.get_block_size(), types::util<typename Matrix_h::value_type>::get_zero());
for ( int rc = h_R_row_offsets[row]; rc < h_R_row_offsets[row + 1]; rc++ )
{
int j = h_R_column_indices[rc];
for ( int ac = h_A.row_offsets[j]; ac < h_A.row_offsets[j + 1] + diag_prop; ac++ )
{
int k = (ac == h_A.row_offsets[j + 1]) ? j : h_A.col_indices[ac];
for ( int q = h_R_row_offsets[col]; q < h_R_row_offsets[col + 1]; q++ )
if ( k == h_R_column_indices[q] )
{
fill = 1;
int val_idx = (ac == h_A.row_offsets[j + 1]) ? h_A.get_num_nz() + j : ac;
for ( int v = 0; v < h_A.get_block_size(); v++)
{
cur[v] = cur[v] + h_A.values[val_idx * h_A.get_block_size() + v];
}
}
}
}
if ( fill )
{
if ( row != col || !diag_prop )
{
inds.push_back(col);
rows.push_back(row);
num_nnz++;
for ( int v = 0; v < h_A.get_block_size(); v++ )
{
vals.push_back(cur[v]);
}
}
else
{
for ( int v = 0; v < h_A.get_block_size(); v++ )
{
diag.push_back(cur[v]);
}
}
}
}
}
rows.push_back(-1);
// add diagonal to the end
if ( diag_prop )
{
for ( int v = 0; v < num_aggregates * h_A.get_block_size(); v++ )
{
vals.push_back(diag[v]);
}
}
else
{
// Add a zero at the end
for (int v = 0; v < h_A.get_block_size(); v++)
{
vals.push_back(types::util<typename Matrix_h::value_type>::get_zero());
}
}
h_Ac.resize(num_aggregates, num_aggregates, num_nnz, h_A.get_block_dimx(), h_A.get_block_dimy(), 1);
h_Ac.row_indices = rows;
h_Ac.col_indices = inds;
h_Ac.values = vals;
h_Ac.addProps( CSR | ( diag_prop ? DIAG : 0 ) );
h_Ac.computeDiagonal();
h_Ac.set_initialized(1);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define AMGX_CASE_LINE(CASE) template class LowDegCoarseAGenerator<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace aggregation
} // namespace amgx
|
3abd9794ffbd94c69064a2b6d81e41ff32d470a4.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 3abd9794ffbd94c69064a2b6d81e41ff32d470a4.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
3d346f6f2e49268a38ae76ef265af71fff696cfc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "SubIntsCUDA.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
SubIntsCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
SubIntsCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
SubIntsCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3d346f6f2e49268a38ae76ef265af71fff696cfc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "SubIntsCUDA.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
SubIntsCUDA<<<gridBlock,threadBlock>>>(a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
SubIntsCUDA<<<gridBlock,threadBlock>>>(a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
SubIntsCUDA<<<gridBlock,threadBlock>>>(a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fc839960cd10a13cf2499906eca4e61be4cee2f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "CosineSqAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file CosineSqAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the cosine squared angle forces. Used by
CosineSqAngleForceComputeGPU.
*/
//! Kernel for calculating cosine squared angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_cosinesq_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = __ldg(d_params + cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = fast::sqrt(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = fast::sqrt(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb; // cos(t)
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
// actually calculate the force
// should the user pass cos(t_0) so that it's not calculated each time for each angle?
Scalar dcosth = c_abbc - fast::cos(t_0);
Scalar tk = K*dcosth;
Scalar a = Scalar(1.0) * tk;
Scalar a11 = a * c_abbc / rsqab;
Scalar a12 = -a / (rab * rcb);
Scalar a22 = a * c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// the rest should be the same as for the harmonic bond
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dcosth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
hipError_t gpu_compute_cosinesq_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size)
{
assert(d_params);
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_compute_cosinesq_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( N / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_compute_cosinesq_angle_forces_kernel), dim3(grid), dim3(threads), 0, 0,
d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return hipSuccess;
}
| fc839960cd10a13cf2499906eca4e61be4cee2f8.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "CosineSqAngleForceGPU.cuh"
#include "hoomd/TextureTools.h"
#include <assert.h>
// SMALL a relatively small number
#define SMALL Scalar(0.001)
/*! \file CosineSqAngleForceGPU.cu
\brief Defines GPU kernel code for calculating the cosine squared angle forces. Used by
CosineSqAngleForceComputeGPU.
*/
//! Kernel for calculating cosine squared angle forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch Pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param d_params Parameters for the angle force
\param box Box dimensions for periodic boundary condition handling
\param alist Angle data to use in calculating the forces
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
*/
extern "C" __global__ void gpu_compute_cosinesq_angle_forces_kernel(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar2 *d_params,
BoxDim box,
const group_storage<3> *alist,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_angles = n_angles_list[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c triplet
Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z);
Scalar3 a_pos,b_pos,c_pos; // allocate space for the a,b, and c atom in the a-b-c triplet
// initialize the force to 0
Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar fab[3], fcb[3];
// initialize the virial to 0
Scalar virial[6];
for (int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
// loop over all angles
for (int angle_idx = 0; angle_idx < n_angles; angle_idx++)
{
group_storage<3> cur_angle = alist[pitch*angle_idx + idx];
int cur_angle_x_idx = cur_angle.idx[0];
int cur_angle_y_idx = cur_angle.idx[1];
int cur_angle_type = cur_angle.idx[2];
int cur_angle_abc = apos_list[pitch*angle_idx + idx];
// get the a-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 x_postype = d_pos[cur_angle_x_idx];
Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
Scalar4 y_postype = d_pos[cur_angle_y_idx];
Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z);
if (cur_angle_abc == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
}
if (cur_angle_abc == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
}
// calculate dr for a-b,c-b,and a-c
Scalar3 dab = a_pos - b_pos;
Scalar3 dcb = c_pos - b_pos;
Scalar3 dac = a_pos - c_pos;
// apply periodic boundary conditions
dab = box.minImage(dab);
dcb = box.minImage(dcb);
dac = box.minImage(dac);
// get the angle parameters (MEM TRANSFER: 8 bytes)
Scalar2 params = __ldg(d_params + cur_angle_type);
Scalar K = params.x;
Scalar t_0 = params.y;
Scalar rsqab = dot(dab, dab);
Scalar rab = fast::sqrt(rsqab);
Scalar rsqcb = dot(dcb, dcb);
Scalar rcb = fast::sqrt(rsqcb);
Scalar c_abbc = dot(dab, dcb);
c_abbc /= rab*rcb; // cos(t)
if (c_abbc > Scalar(1.0)) c_abbc = Scalar(1.0);
if (c_abbc < -Scalar(1.0)) c_abbc = -Scalar(1.0);
// actually calculate the force
// should the user pass cos(t_0) so that it's not calculated each time for each angle?
Scalar dcosth = c_abbc - fast::cos(t_0);
Scalar tk = K*dcosth;
Scalar a = Scalar(1.0) * tk;
Scalar a11 = a * c_abbc / rsqab;
Scalar a12 = -a / (rab * rcb);
Scalar a22 = a * c_abbc / rsqcb;
fab[0] = a11*dab.x + a12*dcb.x;
fab[1] = a11*dab.y + a12*dcb.y;
fab[2] = a11*dab.z + a12*dcb.z;
fcb[0] = a22*dcb.x + a12*dab.x;
fcb[1] = a22*dcb.y + a12*dab.y;
fcb[2] = a22*dcb.z + a12*dab.z;
// the rest should be the same as for the harmonic bond
// compute 1/3 of the energy, 1/3 for each atom in the angle
Scalar angle_eng = tk*dcosth*Scalar(Scalar(1.0)/Scalar(6.0));
// upper triangular version of virial tensor
Scalar angle_virial[6];
angle_virial[0] = Scalar(1./3.)*(dab.x*fab[0] + dcb.x*fcb[0]);
angle_virial[1] = Scalar(1./3.)*(dab.y*fab[0] + dcb.y*fcb[0]);
angle_virial[2] = Scalar(1./3.)*(dab.z*fab[0] + dcb.z*fcb[0]);
angle_virial[3] = Scalar(1./3.)*(dab.y*fab[1] + dcb.y*fcb[1]);
angle_virial[4] = Scalar(1./3.)*(dab.z*fab[1] + dcb.z*fcb[1]);
angle_virial[5] = Scalar(1./3.)*(dab.z*fab[2] + dcb.z*fcb[2]);
if (cur_angle_abc == 0)
{
force_idx.x += fab[0];
force_idx.y += fab[1];
force_idx.z += fab[2];
}
if (cur_angle_abc == 1)
{
force_idx.x -= fab[0] + fcb[0];
force_idx.y -= fab[1] + fcb[1];
force_idx.z -= fab[2] + fcb[2];
}
if (cur_angle_abc == 2)
{
force_idx.x += fcb[0];
force_idx.y += fcb[1];
force_idx.z += fcb[2];
}
force_idx.w += angle_eng;
for (int i = 0; i < 6; i++)
virial[i] += angle_virial[i];
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
for (int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = virial[i];
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param virial_pitch pitch of 2D virial array
\param N number of particles
\param d_pos device array of particle positions
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param atable List of angles stored on the GPU
\param pitch Pitch of 2D angles list
\param n_angles_list List of numbers of angles stored on the GPU
\param d_params K and t_0 params packed as Scalar2 variables
\param n_angle_types Number of angle types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
\a d_params should include one Scalar2 element per angle type. The x component contains K the spring constant
and the y component contains t_0 the equilibrium angle.
*/
cudaError_t gpu_compute_cosinesq_angle_forces(Scalar4* d_force,
Scalar* d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const BoxDim& box,
const group_storage<3> *atable,
const unsigned int *apos_list,
const unsigned int pitch,
const unsigned int *n_angles_list,
Scalar2 *d_params,
unsigned int n_angle_types,
int block_size)
{
assert(d_params);
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void *)gpu_compute_cosinesq_angle_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// setup the grid to run the kernel
dim3 grid( N / run_block_size + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
gpu_compute_cosinesq_angle_forces_kernel<<< grid, threads>>>(
d_force, d_virial, virial_pitch, N, d_pos, d_params, box,
atable, apos_list, pitch, n_angles_list);
return cudaSuccess;
}
|
984b8c55e8172b0908f63494ba1381d4e12630b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.cuh>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include "hash_join_hip.cuh"
namespace cudf {
namespace detail {
/**
* @brief Returns a vector with non-common indices which is set difference
* between `[0, num_columns)` and index values in common_column_indices
*
* @param num_columns The number of columns, which represents column indices
* from `[0, num_columns)` in a table
* @param common_column_indices A vector of common indices which needs to be
* excluded from `[0, num_columns)`
*
* @return vector A vector containing only the indices which are not present in
* `common_column_indices`
*/
auto non_common_column_indices(size_type num_columns,
std::vector<size_type> const &common_column_indices)
{
CUDF_EXPECTS(common_column_indices.size() <= static_cast<uint64_t>(num_columns),
"Too many columns in common");
std::vector<size_type> all_column_indices(num_columns);
std::iota(std::begin(all_column_indices), std::end(all_column_indices), 0);
std::vector<size_type> sorted_common_column_indices{common_column_indices};
std::sort(std::begin(sorted_common_column_indices), std::end(sorted_common_column_indices));
std::vector<size_type> non_common_column_indices(num_columns - common_column_indices.size());
std::set_difference(std::cbegin(all_column_indices),
std::cend(all_column_indices),
std::cbegin(sorted_common_column_indices),
std::cend(sorted_common_column_indices),
std::begin(non_common_column_indices));
return non_common_column_indices;
}
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> get_empty_joined_table(
table_view const &probe,
table_view const &build,
std::vector<std::pair<size_type, size_type>> const &columns_in_common,
cudf::hash_join::common_columns_output_side common_columns_output_side)
{
std::vector<size_type> columns_to_exclude(columns_in_common.size());
std::transform(columns_in_common.begin(),
columns_in_common.end(),
columns_to_exclude.begin(),
[common_columns_output_side](auto &col) {
return common_columns_output_side == hash_join::common_columns_output_side::PROBE
? col.second
: col.first;
});
std::vector<size_type> non_common_indices = non_common_column_indices(
common_columns_output_side == hash_join::common_columns_output_side::PROBE
? build.num_columns()
: probe.num_columns(),
columns_to_exclude);
std::unique_ptr<table> empty_probe = empty_like(probe);
std::unique_ptr<table> empty_build = empty_like(build);
if (common_columns_output_side == hash_join::common_columns_output_side::PROBE) {
table_view empty_build_view = empty_build->select(non_common_indices);
empty_build = std::make_unique<table>(empty_build_view);
} else {
table_view empty_probe_view = empty_probe->select(non_common_indices);
empty_probe = std::make_unique<table>(empty_probe_view);
}
return std::make_pair(std::move(empty_probe), std::move(empty_build));
}
VectorPair concatenate_vector_pairs(VectorPair &a, VectorPair &b)
{
CUDF_EXPECTS((a.first.size() == a.second.size()),
"Mismatch between sizes of vectors in vector pair");
CUDF_EXPECTS((b.first.size() == b.second.size()),
"Mismatch between sizes of vectors in vector pair");
if (a.first.size() == 0) {
return b;
} else if (b.first.size() == 0) {
return a;
}
auto original_size = a.first.size();
a.first.resize(a.first.size() + b.first.size());
a.second.resize(a.second.size() + b.second.size());
thrust::copy(b.first.begin(), b.first.end(), a.first.begin() + original_size);
thrust::copy(b.second.begin(), b.second.end(), a.second.begin() + original_size);
return a;
}
template <typename T>
struct valid_range {
T start, stop;
__host__ __device__ valid_range(const T begin, const T end) : start(begin), stop(end) {}
__host__ __device__ __forceinline__ bool operator()(const T index)
{
return ((index >= start) && (index < stop));
}
};
/**
* @brief Creates a table containing the complement of left join indices.
* This table has two columns. The first one is filled with JoinNoneValue(-1)
* and the second one contains values from 0 to right_table_row_count - 1
* excluding those found in the right_indices column.
*
* @param right_indices Vector of indices
* @param left_table_row_count Number of rows of left table
* @param right_table_row_count Number of rows of right table
* @param stream CUDA stream used for device memory operations and kernel launches.
*
* @return Pair of vectors containing the left join indices complement
*/
std::pair<rmm::device_vector<size_type>, rmm::device_vector<size_type>>
get_left_join_indices_complement(rmm::device_vector<size_type> &right_indices,
size_type left_table_row_count,
size_type right_table_row_count,
hipStream_t stream)
{
// Get array of indices that do not appear in right_indices
// Vector allocated for unmatched result
rmm::device_vector<size_type> right_indices_complement(right_table_row_count);
// If left table is empty in a full join call then all rows of the right table
// should be represented in the joined indices. This is an optimization since
// if left table is empty and full join is called all the elements in
// right_indices will be JoinNoneValue, i.e. -1. This if path should
// produce exactly the same result as the else path but will be faster.
if (left_table_row_count == 0) {
thrust::sequence(rmm::exec_policy(stream)->on(stream),
right_indices_complement.begin(),
right_indices_complement.end(),
0);
} else {
// Assume all the indices in invalid_index_map are invalid
rmm::device_vector<size_type> invalid_index_map(right_table_row_count, 1);
// Functor to check for index validity since left joins can create invalid indices
valid_range<size_type> valid(0, right_table_row_count);
// invalid_index_map[index_ptr[i]] = 0 for i = 0 to right_table_row_count
// Thus specifying that those locations are valid
thrust::scatter_if(rmm::exec_policy(stream)->on(stream),
thrust::make_constant_iterator(0),
thrust::make_constant_iterator(0) + right_indices.size(),
right_indices.begin(), // Index locations
right_indices.begin(), // Stencil - Check if index location is valid
invalid_index_map.begin(), // Output indices
valid); // Stencil Predicate
size_type begin_counter = static_cast<size_type>(0);
size_type end_counter = static_cast<size_type>(right_table_row_count);
// Create list of indices that have been marked as invalid
size_type indices_count = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(begin_counter),
thrust::make_counting_iterator(end_counter),
invalid_index_map.begin(),
right_indices_complement.begin(),
thrust::identity<size_type>()) -
right_indices_complement.begin();
right_indices_complement.resize(indices_count);
}
rmm::device_vector<size_type> left_invalid_indices(right_indices_complement.size(),
JoinNoneValue);
return std::make_pair(std::move(left_invalid_indices), std::move(right_indices_complement));
}
/**
* @brief Builds the hash table based on the given `build_table`.
*
* @throw cudf::logic_error if the number of columns in `build` table is 0.
* @throw cudf::logic_error if the number of rows in `build` table is 0.
* @throw cudf::logic_error if insertion to the hash table fails.
* @throw std::out_of_range if elements of `build_on` exceed the number of columns in the `build`
* table.
*
* @param build_table Table of build side columns to join.
* @param stream CUDA stream used for device memory operations and kernel launches.
*
* @return Built hash table.
*/
std::unique_ptr<multimap_type, std::function<void(multimap_type *)>> build_join_hash_table(
cudf::table_device_view build_table, hipStream_t stream)
{
CUDF_EXPECTS(0 != build_table.num_columns(), "Selected build dataset is empty");
CUDF_EXPECTS(0 != build_table.num_rows(), "Build side table has no rows");
const size_type build_table_num_rows{build_table.num_rows()};
size_t const hash_table_size = compute_hash_table_size(build_table_num_rows);
auto hash_table = multimap_type::create(hash_table_size,
true,
multimap_type::hasher(),
multimap_type::key_equal(),
multimap_type::allocator_type(),
stream);
row_hash hash_build{build_table};
rmm::device_scalar<int> failure(0, 0);
constexpr int block_size{DEFAULT_JOIN_BLOCK_SIZE};
detail::grid_1d config(build_table_num_rows, block_size);
hipLaunchKernelGGL(( build_hash_table), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, 0,
*hash_table, hash_build, build_table_num_rows, failure.data());
// Check error code from the kernel
if (failure.value() == 1) { CUDF_FAIL("Hash Table insert failure."); }
return hash_table;
}
/**
* @brief Probes the `hash_table` built from `build_table` for tuples in `probe_table`,
* and returns the output indices of `build_table` and `probe_table` as a combined table.
*
* @tparam JoinKind The type of join to be performed.
*
* @param build_table Table of build side columns to join.
* @param probe_table Table of probe side columns to join.
* @param hash_table Hash table built from `build_table`.
* @param compare_nulls Controls whether null join-key values should match or not.
* @param stream CUDA stream used for device memory operations and kernel launches.
*
* @return Join output indices vector pair.
*/
template <join_kind JoinKind>
std::pair<rmm::device_vector<size_type>, rmm::device_vector<size_type>> probe_join_hash_table(
cudf::table_device_view build_table,
cudf::table_device_view probe_table,
multimap_type const &hash_table,
null_equality compare_nulls,
hipStream_t stream)
{
size_type estimated_size = estimate_join_output_size<JoinKind, multimap_type>(
build_table, probe_table, hash_table, compare_nulls, stream);
// If the estimated output size is zero, return immediately
if (estimated_size == 0) {
return std::make_pair(rmm::device_vector<size_type>{}, rmm::device_vector<size_type>{});
}
// Because we are approximating the number of joined elements, our approximation
// might be incorrect and we might have underestimated the number of joined elements.
// As such we will need to de-allocate memory and re-allocate memory to ensure
// that the final output is correct.
rmm::device_scalar<size_type> write_index(0, stream);
size_type join_size{0};
rmm::device_vector<size_type> left_indices;
rmm::device_vector<size_type> right_indices;
auto current_estimated_size = estimated_size;
do {
left_indices.resize(estimated_size);
right_indices.resize(estimated_size);
constexpr int block_size{DEFAULT_JOIN_BLOCK_SIZE};
detail::grid_1d config(probe_table.num_rows(), block_size);
write_index.set_value(0);
row_hash hash_probe{probe_table};
row_equality equality{probe_table, build_table, compare_nulls == null_equality::EQUAL};
hipLaunchKernelGGL(( probe_hash_table<JoinKind, multimap_type, block_size, DEFAULT_JOIN_CACHE_SIZE>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream, hash_table,
build_table,
probe_table,
hash_probe,
equality,
left_indices.data().get(),
right_indices.data().get(),
write_index.data(),
estimated_size);
CHECK_CUDA(stream);
join_size = write_index.value();
current_estimated_size = estimated_size;
estimated_size *= 2;
} while ((current_estimated_size < join_size));
left_indices.resize(join_size);
right_indices.resize(join_size);
return std::make_pair(std::move(left_indices), std::move(right_indices));
}
/**
* @brief Combines the non common probe, common probe, non common build and common build
* columns in the correct order according to `common_columns_output_side` to form the joined
* (`probe`, `build`) table pair.
*
* @param probe_noncommon_cols Columns obtained by gathering non common probe columns.
* @param probe_noncommon_col_indices Output locations of non common probe columns in the probe
* portion.
* @param probe_common_col_indices Output locations of common probe columns in the probe portion.
* @param build_noncommon_cols Columns obtained by gathering non common build columns.
* @param build_noncommon_col_indices Output locations of non common build columns in the build
* portion.
* @param build_common_col_indices Output locations of common build columns in the build portion.
* @param common_cols Columns obtained by gathering common columns from `probe` and `build` tables
* in the build portion.
* @param common_columns_output_side @see cudf::hash_join::common_columns_output_side.
*
* @return Table pair of (`probe`, `build`).
*/
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> combine_join_columns(
std::vector<std::unique_ptr<column>> &&probe_noncommon_cols,
std::vector<size_type> const &probe_noncommon_col_indices,
std::vector<size_type> const &probe_common_col_indices,
std::vector<std::unique_ptr<column>> &&build_noncommon_cols,
std::vector<size_type> const &build_noncommon_col_indices,
std::vector<size_type> const &build_common_col_indices,
std::vector<std::unique_ptr<column>> &&common_cols,
cudf::hash_join::common_columns_output_side common_columns_output_side)
{
if (common_columns_output_side == cudf::hash_join::common_columns_output_side::PROBE) {
std::vector<std::unique_ptr<column>> probe_cols(probe_noncommon_cols.size() +
common_cols.size());
for (size_t i = 0; i < probe_noncommon_cols.size(); ++i) {
probe_cols.at(probe_noncommon_col_indices.at(i)) = std::move(probe_noncommon_cols.at(i));
}
for (size_t i = 0; i < common_cols.size(); ++i) {
probe_cols.at(probe_common_col_indices.at(i)) = std::move(common_cols.at(i));
}
return std::make_pair(std::make_unique<cudf::table>(std::move(probe_cols)),
std::make_unique<cudf::table>(std::move(build_noncommon_cols)));
} else {
std::vector<std::unique_ptr<column>> build_cols(build_noncommon_cols.size() +
common_cols.size());
for (size_t i = 0; i < build_noncommon_cols.size(); ++i) {
build_cols.at(build_noncommon_col_indices.at(i)) = std::move(build_noncommon_cols.at(i));
}
for (size_t i = 0; i < common_cols.size(); ++i) {
build_cols.at(build_common_col_indices.at(i)) = std::move(common_cols.at(i));
}
return std::make_pair(std::make_unique<cudf::table>(std::move(probe_noncommon_cols)),
std::make_unique<cudf::table>(std::move(build_cols)));
}
}
/**
* @brief Gathers rows from `probe` and `build` table and returns a (`probe`, `build`) table pair,
* which contains the probe and build portions of the logical joined table respectively.
*
* @tparam JoinKind The type of join to be performed
*
* @param probe Probe side table
* @param build build side table
* @param joined_indices Pair of vectors containing row indices from which
* `probe` and `build` tables are gathered. If any row index is out of bounds,
* the contribution in the output `table` will be NULL.
* @param columns_in_common is a vector of pairs of column indices
* from tables `probe` and `build` respectively, that are "in common".
* For "common" columns, only a single output column will be produced.
* For an inner or left join, the result will be gathered from the column in
* `probe`. For a full join, the result will be gathered from both common
* columns in `probe` and `build` and concatenated to form a single column.
* @param common_columns_output_side @see cudf::hash_join::common_columns_output_side.
*
* @return Table pair of (`probe`, `build`) containing the rows from `probe` and
* `build` specified by `joined_indices`.
* Columns in `columns_in_common` will be included in either `probe` or `build` portion as
* `common_columns_output_side` indicates. Final form would look like
* (`probe(including common columns)`, `build(excluding common columns)`) if
* `common_columns_output_side` is `PROBE`, or (`probe(excluding common columns)`,
* `build(including common columns)`) if `common_columns_output_side` is `BUILD`.
*/
template <join_kind JoinKind>
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> construct_join_output_df(
table_view const &probe,
table_view const &build,
VectorPair &joined_indices,
std::vector<std::pair<size_type, size_type>> const &columns_in_common,
cudf::hash_join::common_columns_output_side common_columns_output_side,
rmm::mr::device_memory_resource *mr,
hipStream_t stream)
{
std::vector<size_type> probe_common_col;
probe_common_col.reserve(columns_in_common.size());
std::vector<size_type> build_common_col;
build_common_col.reserve(columns_in_common.size());
for (const auto &c : columns_in_common) {
probe_common_col.push_back(c.first);
build_common_col.push_back(c.second);
}
std::vector<size_type> probe_noncommon_col =
non_common_column_indices(probe.num_columns(), probe_common_col);
std::vector<size_type> build_noncommon_col =
non_common_column_indices(build.num_columns(), build_common_col);
bool const nullify_out_of_bounds{JoinKind != join_kind::INNER_JOIN};
std::unique_ptr<table> common_table = std::make_unique<table>();
// Construct the joined columns
if (join_kind::FULL_JOIN == JoinKind) {
auto complement_indices = get_left_join_indices_complement(
joined_indices.second, probe.num_rows(), build.num_rows(), stream);
if (not columns_in_common.empty()) {
auto common_from_build = detail::gather(build.select(build_common_col),
complement_indices.second.begin(),
complement_indices.second.end(),
nullify_out_of_bounds,
rmm::mr::get_default_resource(),
stream);
auto common_from_probe = detail::gather(probe.select(probe_common_col),
joined_indices.first.begin(),
joined_indices.first.end(),
nullify_out_of_bounds,
rmm::mr::get_default_resource(),
stream);
common_table = cudf::detail::concatenate(
{common_from_build->view(), common_from_probe->view()}, mr, stream);
}
joined_indices = concatenate_vector_pairs(complement_indices, joined_indices);
} else {
if (not columns_in_common.empty()) {
common_table = detail::gather(probe.select(probe_common_col),
joined_indices.first.begin(),
joined_indices.first.end(),
nullify_out_of_bounds,
mr,
stream);
}
}
// Construct the probe non common columns
std::unique_ptr<table> probe_table = detail::gather(probe.select(probe_noncommon_col),
joined_indices.first.begin(),
joined_indices.first.end(),
nullify_out_of_bounds,
mr,
stream);
std::unique_ptr<table> build_table = detail::gather(build.select(build_noncommon_col),
joined_indices.second.begin(),
joined_indices.second.end(),
nullify_out_of_bounds,
mr,
stream);
return combine_join_columns(probe_table->release(),
probe_noncommon_col,
probe_common_col,
build_table->release(),
build_noncommon_col,
build_common_col,
common_table->release(),
common_columns_output_side);
}
std::unique_ptr<cudf::table> combine_table_pair(std::unique_ptr<cudf::table> &&left,
std::unique_ptr<cudf::table> &&right)
{
auto joined_cols = left->release();
auto right_cols = right->release();
joined_cols.insert(joined_cols.end(),
std::make_move_iterator(right_cols.begin()),
std::make_move_iterator(right_cols.end()));
return std::make_unique<cudf::table>(std::move(joined_cols));
}
} // namespace detail
hash_join::hash_join_impl::~hash_join_impl() = default;
hash_join::hash_join_impl::hash_join_impl(cudf::table_view const &build,
std::vector<size_type> const &build_on)
: _build(build),
_build_selected(build.select(build_on)),
_build_on(build_on),
_hash_table(nullptr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(0 != _build.num_columns(), "Hash join build table is empty");
CUDF_EXPECTS(_build.num_rows() < cudf::detail::MAX_JOIN_SIZE,
"Build column size is too big for hash join");
if (_build_on.empty() || 0 == build.num_rows()) { return; }
auto build_table = cudf::table_device_view::create(_build_selected);
_hash_table = build_join_hash_table(*build_table, 0);
}
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::table>>
hash_join::hash_join_impl::inner_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
common_columns_output_side common_columns_output_side,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr) const
{
CUDF_FUNC_RANGE();
return compute_hash_join<cudf::detail::join_kind::INNER_JOIN>(
probe, probe_on, columns_in_common, common_columns_output_side, compare_nulls, mr);
}
std::unique_ptr<cudf::table> hash_join::hash_join_impl::left_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr) const
{
CUDF_FUNC_RANGE();
auto probe_build_pair = compute_hash_join<cudf::detail::join_kind::LEFT_JOIN>(
probe, probe_on, columns_in_common, common_columns_output_side::PROBE, compare_nulls, mr);
return cudf::detail::combine_table_pair(std::move(probe_build_pair.first),
std::move(probe_build_pair.second));
}
std::unique_ptr<cudf::table> hash_join::hash_join_impl::full_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr) const
{
CUDF_FUNC_RANGE();
auto probe_build_pair = compute_hash_join<cudf::detail::join_kind::FULL_JOIN>(
probe, probe_on, columns_in_common, common_columns_output_side::PROBE, compare_nulls, mr);
return cudf::detail::combine_table_pair(std::move(probe_build_pair.first),
std::move(probe_build_pair.second));
}
template <cudf::detail::join_kind JoinKind>
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::table>>
hash_join::hash_join_impl::compute_hash_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
common_columns_output_side common_columns_output_side,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr,
hipStream_t stream) const
{
CUDF_EXPECTS(0 != probe.num_columns(), "Hash join probe table is empty");
CUDF_EXPECTS(probe.num_rows() < cudf::detail::MAX_JOIN_SIZE,
"Probe column size is too big for hash join");
CUDF_EXPECTS(_build_on.size() == probe_on.size(),
"Mismatch in number of columns to be joined on");
CUDF_EXPECTS(std::all_of(columns_in_common.begin(),
columns_in_common.end(),
[this, &probe_on](auto pair) {
size_t p = std::find(probe_on.begin(), probe_on.end(), pair.first) -
probe_on.begin();
size_t b = std::find(_build_on.begin(), _build_on.end(), pair.second) -
_build_on.begin();
return (p != probe_on.size()) && (b != _build_on.size()) && (p == b);
}),
"Invalid values passed to columns_in_common");
if (is_trivial_join(probe, _build, probe_on, _build_on, JoinKind)) {
return get_empty_joined_table(probe, _build, columns_in_common, common_columns_output_side);
}
auto probe_selected = probe.select(probe_on);
CUDF_EXPECTS(std::equal(std::cbegin(_build_selected),
std::cend(_build_selected),
std::cbegin(probe_selected),
std::cend(probe_selected),
[](const auto &b, const auto &p) { return b.type() == p.type(); }),
"Mismatch in joining column data types");
constexpr cudf::detail::join_kind ProbeJoinKind = (JoinKind == cudf::detail::join_kind::FULL_JOIN)
? cudf::detail::join_kind::LEFT_JOIN
: JoinKind;
auto joined_indices = probe_join_indices<ProbeJoinKind>(probe_selected, compare_nulls, stream);
return cudf::detail::construct_join_output_df<JoinKind>(
probe, _build, joined_indices, columns_in_common, common_columns_output_side, mr, stream);
}
template <cudf::detail::join_kind JoinKind>
std::enable_if_t<JoinKind != cudf::detail::join_kind::FULL_JOIN,
std::pair<rmm::device_vector<size_type>, rmm::device_vector<size_type>>>
hash_join::hash_join_impl::probe_join_indices(cudf::table_view const &probe,
null_equality compare_nulls,
hipStream_t stream) const
{
// Trivial left join case - exit early
if (!_hash_table && JoinKind == cudf::detail::join_kind::LEFT_JOIN) {
return get_trivial_left_join_indices(probe, stream);
}
CUDF_EXPECTS(_hash_table, "Hash table of hash join is null.");
auto build_table = cudf::table_device_view::create(_build_selected, stream);
auto probe_table = cudf::table_device_view::create(probe, stream);
return cudf::detail::probe_join_hash_table<JoinKind>(
*build_table, *probe_table, *_hash_table, compare_nulls, stream);
}
} // namespace cudf
| 984b8c55e8172b0908f63494ba1381d4e12630b0.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.cuh>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include "hash_join.cuh"
namespace cudf {
namespace detail {
/**
* @brief Returns a vector with non-common indices which is set difference
* between `[0, num_columns)` and index values in common_column_indices
*
* @param num_columns The number of columns, which represents column indices
* from `[0, num_columns)` in a table
* @param common_column_indices A vector of common indices which needs to be
* excluded from `[0, num_columns)`
*
* @return vector A vector containing only the indices which are not present in
* `common_column_indices`
*/
auto non_common_column_indices(size_type num_columns,
std::vector<size_type> const &common_column_indices)
{
CUDF_EXPECTS(common_column_indices.size() <= static_cast<uint64_t>(num_columns),
"Too many columns in common");
std::vector<size_type> all_column_indices(num_columns);
std::iota(std::begin(all_column_indices), std::end(all_column_indices), 0);
std::vector<size_type> sorted_common_column_indices{common_column_indices};
std::sort(std::begin(sorted_common_column_indices), std::end(sorted_common_column_indices));
std::vector<size_type> non_common_column_indices(num_columns - common_column_indices.size());
std::set_difference(std::cbegin(all_column_indices),
std::cend(all_column_indices),
std::cbegin(sorted_common_column_indices),
std::cend(sorted_common_column_indices),
std::begin(non_common_column_indices));
return non_common_column_indices;
}
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> get_empty_joined_table(
table_view const &probe,
table_view const &build,
std::vector<std::pair<size_type, size_type>> const &columns_in_common,
cudf::hash_join::common_columns_output_side common_columns_output_side)
{
std::vector<size_type> columns_to_exclude(columns_in_common.size());
std::transform(columns_in_common.begin(),
columns_in_common.end(),
columns_to_exclude.begin(),
[common_columns_output_side](auto &col) {
return common_columns_output_side == hash_join::common_columns_output_side::PROBE
? col.second
: col.first;
});
std::vector<size_type> non_common_indices = non_common_column_indices(
common_columns_output_side == hash_join::common_columns_output_side::PROBE
? build.num_columns()
: probe.num_columns(),
columns_to_exclude);
std::unique_ptr<table> empty_probe = empty_like(probe);
std::unique_ptr<table> empty_build = empty_like(build);
if (common_columns_output_side == hash_join::common_columns_output_side::PROBE) {
table_view empty_build_view = empty_build->select(non_common_indices);
empty_build = std::make_unique<table>(empty_build_view);
} else {
table_view empty_probe_view = empty_probe->select(non_common_indices);
empty_probe = std::make_unique<table>(empty_probe_view);
}
return std::make_pair(std::move(empty_probe), std::move(empty_build));
}
VectorPair concatenate_vector_pairs(VectorPair &a, VectorPair &b)
{
CUDF_EXPECTS((a.first.size() == a.second.size()),
"Mismatch between sizes of vectors in vector pair");
CUDF_EXPECTS((b.first.size() == b.second.size()),
"Mismatch between sizes of vectors in vector pair");
if (a.first.size() == 0) {
return b;
} else if (b.first.size() == 0) {
return a;
}
auto original_size = a.first.size();
a.first.resize(a.first.size() + b.first.size());
a.second.resize(a.second.size() + b.second.size());
thrust::copy(b.first.begin(), b.first.end(), a.first.begin() + original_size);
thrust::copy(b.second.begin(), b.second.end(), a.second.begin() + original_size);
return a;
}
template <typename T>
struct valid_range {
T start, stop;
__host__ __device__ valid_range(const T begin, const T end) : start(begin), stop(end) {}
__host__ __device__ __forceinline__ bool operator()(const T index)
{
return ((index >= start) && (index < stop));
}
};
/**
* @brief Creates a table containing the complement of left join indices.
* This table has two columns. The first one is filled with JoinNoneValue(-1)
* and the second one contains values from 0 to right_table_row_count - 1
* excluding those found in the right_indices column.
*
* @param right_indices Vector of indices
* @param left_table_row_count Number of rows of left table
* @param right_table_row_count Number of rows of right table
* @param stream CUDA stream used for device memory operations and kernel launches.
*
* @return Pair of vectors containing the left join indices complement
*/
std::pair<rmm::device_vector<size_type>, rmm::device_vector<size_type>>
get_left_join_indices_complement(rmm::device_vector<size_type> &right_indices,
size_type left_table_row_count,
size_type right_table_row_count,
cudaStream_t stream)
{
// Get array of indices that do not appear in right_indices
// Vector allocated for unmatched result
rmm::device_vector<size_type> right_indices_complement(right_table_row_count);
// If left table is empty in a full join call then all rows of the right table
// should be represented in the joined indices. This is an optimization since
// if left table is empty and full join is called all the elements in
// right_indices will be JoinNoneValue, i.e. -1. This if path should
// produce exactly the same result as the else path but will be faster.
if (left_table_row_count == 0) {
thrust::sequence(rmm::exec_policy(stream)->on(stream),
right_indices_complement.begin(),
right_indices_complement.end(),
0);
} else {
// Assume all the indices in invalid_index_map are invalid
rmm::device_vector<size_type> invalid_index_map(right_table_row_count, 1);
// Functor to check for index validity since left joins can create invalid indices
valid_range<size_type> valid(0, right_table_row_count);
// invalid_index_map[index_ptr[i]] = 0 for i = 0 to right_table_row_count
// Thus specifying that those locations are valid
thrust::scatter_if(rmm::exec_policy(stream)->on(stream),
thrust::make_constant_iterator(0),
thrust::make_constant_iterator(0) + right_indices.size(),
right_indices.begin(), // Index locations
right_indices.begin(), // Stencil - Check if index location is valid
invalid_index_map.begin(), // Output indices
valid); // Stencil Predicate
size_type begin_counter = static_cast<size_type>(0);
size_type end_counter = static_cast<size_type>(right_table_row_count);
// Create list of indices that have been marked as invalid
size_type indices_count = thrust::copy_if(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(begin_counter),
thrust::make_counting_iterator(end_counter),
invalid_index_map.begin(),
right_indices_complement.begin(),
thrust::identity<size_type>()) -
right_indices_complement.begin();
right_indices_complement.resize(indices_count);
}
rmm::device_vector<size_type> left_invalid_indices(right_indices_complement.size(),
JoinNoneValue);
return std::make_pair(std::move(left_invalid_indices), std::move(right_indices_complement));
}
/**
* @brief Builds the hash table based on the given `build_table`.
*
* @throw cudf::logic_error if the number of columns in `build` table is 0.
* @throw cudf::logic_error if the number of rows in `build` table is 0.
* @throw cudf::logic_error if insertion to the hash table fails.
* @throw std::out_of_range if elements of `build_on` exceed the number of columns in the `build`
* table.
*
* @param build_table Table of build side columns to join.
* @param stream CUDA stream used for device memory operations and kernel launches.
*
* @return Built hash table.
*/
std::unique_ptr<multimap_type, std::function<void(multimap_type *)>> build_join_hash_table(
cudf::table_device_view build_table, cudaStream_t stream)
{
CUDF_EXPECTS(0 != build_table.num_columns(), "Selected build dataset is empty");
CUDF_EXPECTS(0 != build_table.num_rows(), "Build side table has no rows");
const size_type build_table_num_rows{build_table.num_rows()};
size_t const hash_table_size = compute_hash_table_size(build_table_num_rows);
auto hash_table = multimap_type::create(hash_table_size,
true,
multimap_type::hasher(),
multimap_type::key_equal(),
multimap_type::allocator_type(),
stream);
row_hash hash_build{build_table};
rmm::device_scalar<int> failure(0, 0);
constexpr int block_size{DEFAULT_JOIN_BLOCK_SIZE};
detail::grid_1d config(build_table_num_rows, block_size);
build_hash_table<<<config.num_blocks, config.num_threads_per_block, 0, 0>>>(
*hash_table, hash_build, build_table_num_rows, failure.data());
// Check error code from the kernel
if (failure.value() == 1) { CUDF_FAIL("Hash Table insert failure."); }
return hash_table;
}
/**
* @brief Probes the `hash_table` built from `build_table` for tuples in `probe_table`,
* and returns the output indices of `build_table` and `probe_table` as a combined table.
*
* @tparam JoinKind The type of join to be performed.
*
* @param build_table Table of build side columns to join.
* @param probe_table Table of probe side columns to join.
* @param hash_table Hash table built from `build_table`.
* @param compare_nulls Controls whether null join-key values should match or not.
* @param stream CUDA stream used for device memory operations and kernel launches.
*
* @return Join output indices vector pair.
*/
template <join_kind JoinKind>
std::pair<rmm::device_vector<size_type>, rmm::device_vector<size_type>> probe_join_hash_table(
cudf::table_device_view build_table,
cudf::table_device_view probe_table,
multimap_type const &hash_table,
null_equality compare_nulls,
cudaStream_t stream)
{
size_type estimated_size = estimate_join_output_size<JoinKind, multimap_type>(
build_table, probe_table, hash_table, compare_nulls, stream);
// If the estimated output size is zero, return immediately
if (estimated_size == 0) {
return std::make_pair(rmm::device_vector<size_type>{}, rmm::device_vector<size_type>{});
}
// Because we are approximating the number of joined elements, our approximation
// might be incorrect and we might have underestimated the number of joined elements.
// As such we will need to de-allocate memory and re-allocate memory to ensure
// that the final output is correct.
rmm::device_scalar<size_type> write_index(0, stream);
size_type join_size{0};
rmm::device_vector<size_type> left_indices;
rmm::device_vector<size_type> right_indices;
auto current_estimated_size = estimated_size;
do {
left_indices.resize(estimated_size);
right_indices.resize(estimated_size);
constexpr int block_size{DEFAULT_JOIN_BLOCK_SIZE};
detail::grid_1d config(probe_table.num_rows(), block_size);
write_index.set_value(0);
row_hash hash_probe{probe_table};
row_equality equality{probe_table, build_table, compare_nulls == null_equality::EQUAL};
probe_hash_table<JoinKind, multimap_type, block_size, DEFAULT_JOIN_CACHE_SIZE>
<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>(hash_table,
build_table,
probe_table,
hash_probe,
equality,
left_indices.data().get(),
right_indices.data().get(),
write_index.data(),
estimated_size);
CHECK_CUDA(stream);
join_size = write_index.value();
current_estimated_size = estimated_size;
estimated_size *= 2;
} while ((current_estimated_size < join_size));
left_indices.resize(join_size);
right_indices.resize(join_size);
return std::make_pair(std::move(left_indices), std::move(right_indices));
}
/**
* @brief Combines the non common probe, common probe, non common build and common build
* columns in the correct order according to `common_columns_output_side` to form the joined
* (`probe`, `build`) table pair.
*
* @param probe_noncommon_cols Columns obtained by gathering non common probe columns.
* @param probe_noncommon_col_indices Output locations of non common probe columns in the probe
* portion.
* @param probe_common_col_indices Output locations of common probe columns in the probe portion.
* @param build_noncommon_cols Columns obtained by gathering non common build columns.
* @param build_noncommon_col_indices Output locations of non common build columns in the build
* portion.
* @param build_common_col_indices Output locations of common build columns in the build portion.
* @param common_cols Columns obtained by gathering common columns from `probe` and `build` tables
* in the build portion.
* @param common_columns_output_side @see cudf::hash_join::common_columns_output_side.
*
* @return Table pair of (`probe`, `build`).
*/
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> combine_join_columns(
std::vector<std::unique_ptr<column>> &&probe_noncommon_cols,
std::vector<size_type> const &probe_noncommon_col_indices,
std::vector<size_type> const &probe_common_col_indices,
std::vector<std::unique_ptr<column>> &&build_noncommon_cols,
std::vector<size_type> const &build_noncommon_col_indices,
std::vector<size_type> const &build_common_col_indices,
std::vector<std::unique_ptr<column>> &&common_cols,
cudf::hash_join::common_columns_output_side common_columns_output_side)
{
if (common_columns_output_side == cudf::hash_join::common_columns_output_side::PROBE) {
std::vector<std::unique_ptr<column>> probe_cols(probe_noncommon_cols.size() +
common_cols.size());
for (size_t i = 0; i < probe_noncommon_cols.size(); ++i) {
probe_cols.at(probe_noncommon_col_indices.at(i)) = std::move(probe_noncommon_cols.at(i));
}
for (size_t i = 0; i < common_cols.size(); ++i) {
probe_cols.at(probe_common_col_indices.at(i)) = std::move(common_cols.at(i));
}
return std::make_pair(std::make_unique<cudf::table>(std::move(probe_cols)),
std::make_unique<cudf::table>(std::move(build_noncommon_cols)));
} else {
std::vector<std::unique_ptr<column>> build_cols(build_noncommon_cols.size() +
common_cols.size());
for (size_t i = 0; i < build_noncommon_cols.size(); ++i) {
build_cols.at(build_noncommon_col_indices.at(i)) = std::move(build_noncommon_cols.at(i));
}
for (size_t i = 0; i < common_cols.size(); ++i) {
build_cols.at(build_common_col_indices.at(i)) = std::move(common_cols.at(i));
}
return std::make_pair(std::make_unique<cudf::table>(std::move(probe_noncommon_cols)),
std::make_unique<cudf::table>(std::move(build_cols)));
}
}
/**
* @brief Gathers rows from `probe` and `build` table and returns a (`probe`, `build`) table pair,
* which contains the probe and build portions of the logical joined table respectively.
*
* @tparam JoinKind The type of join to be performed
*
* @param probe Probe side table
* @param build build side table
* @param joined_indices Pair of vectors containing row indices from which
* `probe` and `build` tables are gathered. If any row index is out of bounds,
* the contribution in the output `table` will be NULL.
* @param columns_in_common is a vector of pairs of column indices
* from tables `probe` and `build` respectively, that are "in common".
* For "common" columns, only a single output column will be produced.
* For an inner or left join, the result will be gathered from the column in
* `probe`. For a full join, the result will be gathered from both common
* columns in `probe` and `build` and concatenated to form a single column.
* @param common_columns_output_side @see cudf::hash_join::common_columns_output_side.
*
* @return Table pair of (`probe`, `build`) containing the rows from `probe` and
* `build` specified by `joined_indices`.
* Columns in `columns_in_common` will be included in either `probe` or `build` portion as
* `common_columns_output_side` indicates. Final form would look like
* (`probe(including common columns)`, `build(excluding common columns)`) if
* `common_columns_output_side` is `PROBE`, or (`probe(excluding common columns)`,
* `build(including common columns)`) if `common_columns_output_side` is `BUILD`.
*/
template <join_kind JoinKind>
std::pair<std::unique_ptr<table>, std::unique_ptr<table>> construct_join_output_df(
table_view const &probe,
table_view const &build,
VectorPair &joined_indices,
std::vector<std::pair<size_type, size_type>> const &columns_in_common,
cudf::hash_join::common_columns_output_side common_columns_output_side,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream)
{
std::vector<size_type> probe_common_col;
probe_common_col.reserve(columns_in_common.size());
std::vector<size_type> build_common_col;
build_common_col.reserve(columns_in_common.size());
for (const auto &c : columns_in_common) {
probe_common_col.push_back(c.first);
build_common_col.push_back(c.second);
}
std::vector<size_type> probe_noncommon_col =
non_common_column_indices(probe.num_columns(), probe_common_col);
std::vector<size_type> build_noncommon_col =
non_common_column_indices(build.num_columns(), build_common_col);
bool const nullify_out_of_bounds{JoinKind != join_kind::INNER_JOIN};
std::unique_ptr<table> common_table = std::make_unique<table>();
// Construct the joined columns
if (join_kind::FULL_JOIN == JoinKind) {
auto complement_indices = get_left_join_indices_complement(
joined_indices.second, probe.num_rows(), build.num_rows(), stream);
if (not columns_in_common.empty()) {
auto common_from_build = detail::gather(build.select(build_common_col),
complement_indices.second.begin(),
complement_indices.second.end(),
nullify_out_of_bounds,
rmm::mr::get_default_resource(),
stream);
auto common_from_probe = detail::gather(probe.select(probe_common_col),
joined_indices.first.begin(),
joined_indices.first.end(),
nullify_out_of_bounds,
rmm::mr::get_default_resource(),
stream);
common_table = cudf::detail::concatenate(
{common_from_build->view(), common_from_probe->view()}, mr, stream);
}
joined_indices = concatenate_vector_pairs(complement_indices, joined_indices);
} else {
if (not columns_in_common.empty()) {
common_table = detail::gather(probe.select(probe_common_col),
joined_indices.first.begin(),
joined_indices.first.end(),
nullify_out_of_bounds,
mr,
stream);
}
}
// Construct the probe non common columns
std::unique_ptr<table> probe_table = detail::gather(probe.select(probe_noncommon_col),
joined_indices.first.begin(),
joined_indices.first.end(),
nullify_out_of_bounds,
mr,
stream);
std::unique_ptr<table> build_table = detail::gather(build.select(build_noncommon_col),
joined_indices.second.begin(),
joined_indices.second.end(),
nullify_out_of_bounds,
mr,
stream);
return combine_join_columns(probe_table->release(),
probe_noncommon_col,
probe_common_col,
build_table->release(),
build_noncommon_col,
build_common_col,
common_table->release(),
common_columns_output_side);
}
std::unique_ptr<cudf::table> combine_table_pair(std::unique_ptr<cudf::table> &&left,
std::unique_ptr<cudf::table> &&right)
{
auto joined_cols = left->release();
auto right_cols = right->release();
joined_cols.insert(joined_cols.end(),
std::make_move_iterator(right_cols.begin()),
std::make_move_iterator(right_cols.end()));
return std::make_unique<cudf::table>(std::move(joined_cols));
}
} // namespace detail
hash_join::hash_join_impl::~hash_join_impl() = default;
hash_join::hash_join_impl::hash_join_impl(cudf::table_view const &build,
std::vector<size_type> const &build_on)
: _build(build),
_build_selected(build.select(build_on)),
_build_on(build_on),
_hash_table(nullptr)
{
CUDF_FUNC_RANGE();
CUDF_EXPECTS(0 != _build.num_columns(), "Hash join build table is empty");
CUDF_EXPECTS(_build.num_rows() < cudf::detail::MAX_JOIN_SIZE,
"Build column size is too big for hash join");
if (_build_on.empty() || 0 == build.num_rows()) { return; }
auto build_table = cudf::table_device_view::create(_build_selected);
_hash_table = build_join_hash_table(*build_table, 0);
}
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::table>>
hash_join::hash_join_impl::inner_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
common_columns_output_side common_columns_output_side,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr) const
{
CUDF_FUNC_RANGE();
return compute_hash_join<cudf::detail::join_kind::INNER_JOIN>(
probe, probe_on, columns_in_common, common_columns_output_side, compare_nulls, mr);
}
std::unique_ptr<cudf::table> hash_join::hash_join_impl::left_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr) const
{
CUDF_FUNC_RANGE();
auto probe_build_pair = compute_hash_join<cudf::detail::join_kind::LEFT_JOIN>(
probe, probe_on, columns_in_common, common_columns_output_side::PROBE, compare_nulls, mr);
return cudf::detail::combine_table_pair(std::move(probe_build_pair.first),
std::move(probe_build_pair.second));
}
std::unique_ptr<cudf::table> hash_join::hash_join_impl::full_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr) const
{
CUDF_FUNC_RANGE();
auto probe_build_pair = compute_hash_join<cudf::detail::join_kind::FULL_JOIN>(
probe, probe_on, columns_in_common, common_columns_output_side::PROBE, compare_nulls, mr);
return cudf::detail::combine_table_pair(std::move(probe_build_pair.first),
std::move(probe_build_pair.second));
}
template <cudf::detail::join_kind JoinKind>
std::pair<std::unique_ptr<cudf::table>, std::unique_ptr<cudf::table>>
hash_join::hash_join_impl::compute_hash_join(
cudf::table_view const &probe,
std::vector<size_type> const &probe_on,
std::vector<std::pair<cudf::size_type, cudf::size_type>> const &columns_in_common,
common_columns_output_side common_columns_output_side,
null_equality compare_nulls,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream) const
{
CUDF_EXPECTS(0 != probe.num_columns(), "Hash join probe table is empty");
CUDF_EXPECTS(probe.num_rows() < cudf::detail::MAX_JOIN_SIZE,
"Probe column size is too big for hash join");
CUDF_EXPECTS(_build_on.size() == probe_on.size(),
"Mismatch in number of columns to be joined on");
CUDF_EXPECTS(std::all_of(columns_in_common.begin(),
columns_in_common.end(),
[this, &probe_on](auto pair) {
size_t p = std::find(probe_on.begin(), probe_on.end(), pair.first) -
probe_on.begin();
size_t b = std::find(_build_on.begin(), _build_on.end(), pair.second) -
_build_on.begin();
return (p != probe_on.size()) && (b != _build_on.size()) && (p == b);
}),
"Invalid values passed to columns_in_common");
if (is_trivial_join(probe, _build, probe_on, _build_on, JoinKind)) {
return get_empty_joined_table(probe, _build, columns_in_common, common_columns_output_side);
}
auto probe_selected = probe.select(probe_on);
CUDF_EXPECTS(std::equal(std::cbegin(_build_selected),
std::cend(_build_selected),
std::cbegin(probe_selected),
std::cend(probe_selected),
[](const auto &b, const auto &p) { return b.type() == p.type(); }),
"Mismatch in joining column data types");
constexpr cudf::detail::join_kind ProbeJoinKind = (JoinKind == cudf::detail::join_kind::FULL_JOIN)
? cudf::detail::join_kind::LEFT_JOIN
: JoinKind;
auto joined_indices = probe_join_indices<ProbeJoinKind>(probe_selected, compare_nulls, stream);
return cudf::detail::construct_join_output_df<JoinKind>(
probe, _build, joined_indices, columns_in_common, common_columns_output_side, mr, stream);
}
template <cudf::detail::join_kind JoinKind>
std::enable_if_t<JoinKind != cudf::detail::join_kind::FULL_JOIN,
std::pair<rmm::device_vector<size_type>, rmm::device_vector<size_type>>>
hash_join::hash_join_impl::probe_join_indices(cudf::table_view const &probe,
null_equality compare_nulls,
cudaStream_t stream) const
{
// Trivial left join case - exit early
if (!_hash_table && JoinKind == cudf::detail::join_kind::LEFT_JOIN) {
return get_trivial_left_join_indices(probe, stream);
}
CUDF_EXPECTS(_hash_table, "Hash table of hash join is null.");
auto build_table = cudf::table_device_view::create(_build_selected, stream);
auto probe_table = cudf::table_device_view::create(probe, stream);
return cudf::detail::probe_join_hash_table<JoinKind>(
*build_table, *probe_table, *_hash_table, compare_nulls, stream);
}
} // namespace cudf
|
132b2d02676b3ce69e7dc1fbc6bedeadc8095d02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef APP_MANAGER_KERNEL_CUH
#define APP_MANAGER_KERNEL_CUH
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void vectorAddKernel(const float *A, const float *B, float *C, int numElements)
{
int i;
if (blockDim.y == 1)
{
i = blockIdx.x*blockDim.x+threadIdx.x;
}
else if (blockDim.z == 1)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
i = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
}
else
{
// unique block index inside a 3D block grid
const unsigned long long int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
// global unique thread index, block dimension uses only x-coordinate
// i = blockId * blockDim.x + threadIdx.x;
i = blockId * (blockDim.x * blockDim.y *blockDim.z) + ( threadIdx.z *blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;;
}
if (i < numElements)
{
// printf("%d - ",i);
C[i] = A[i] + B[i];
// printf("C[%d] %f = %f + %f \n ", i, C[i], A[i], B[i]);
}
}
extern "C" void vectorAdd(const float *d_A, const float *d_B, float *d_C, int numElements, dim3 threadsPerBlock)
{
dim3 numBlocks;
if (threadsPerBlock.y == 1 && threadsPerBlock.z == 1 ) //1D
{
numBlocks.x = (numElements + threadsPerBlock.x - 1) / threadsPerBlock.x;
}
else if (threadsPerBlock.z == 1) //2D 2D
{
int gridX = (sqrt(numElements) + threadsPerBlock.x - 1) / threadsPerBlock.x;
int gridY = (sqrt(numElements) + threadsPerBlock.y - 1) / threadsPerBlock.y;
numBlocks = dim3(gridX, gridY);
}
else //3D 3D
{
long double sr = ::pow(numElements, 1/3.);
int gridX = (sr + threadsPerBlock.x - 1) / threadsPerBlock.x;
int gridY = (sr + threadsPerBlock.y - 1) / threadsPerBlock.y;
int gridZ = (sr + threadsPerBlock.z - 1) / threadsPerBlock.z;
if (sr -floor(sr) != 0 )
{
gridX++;
}
numBlocks = dim3(gridX,gridY,gridZ);
}
printf("Thread configuration (%d,%d,%d) \n",threadsPerBlock.x, threadsPerBlock.y,threadsPerBlock.z);
printf("Block configuration (%d,%d,%d) \n", numBlocks.x, numBlocks.y, numBlocks.z);
hipLaunchKernelGGL(( vectorAddKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
getLastCudaError("vectorAddKernel execution failed.\n");
}
__global__ void matrixMultKernel(const float *a,const float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
// printf("C[%d] %f = %f + %f \n ", i, sum, a[i], b[i]);
}
c[row * k + col] = sum;
}
}
extern "C" void matrixMult(const float *d_A, const float *d_B, float *d_C, int M, int N, int K, dim3 threadsPerBlock)
{
if (threadsPerBlock.y == 1 && threadsPerBlock.z == 1 ) //1D
{
}
else if (threadsPerBlock.z == 1) //2D 2D
{
}
else
{
}
dim3 numBlocks((M + threadsPerBlock.x - 1) / threadsPerBlock.x, (M + threadsPerBlock.y - 1) / threadsPerBlock.y);
printf("Thread configuration (%d,%d,%d) \n",threadsPerBlock.x, threadsPerBlock.y,threadsPerBlock.z);
printf("Block configuration (%d,%d,%d) \n", numBlocks.x, numBlocks.y, numBlocks.z);
hipLaunchKernelGGL(( matrixMultKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, M, N, K);
getLastCudaError("matrixMult execution failed.\n");
}
// The kernel - DOT PRODUCT
__global__ void dotProductKernel(const float *a,const float *b, float *c)
{
extern __shared__ float temp[];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
//Synch threads
__syncthreads();
if (0 == threadIdx.x) {
float sum = 0.00;
int i;
for (i=0; i<blockDim.x; i++)
sum += temp[i];
atomicAdd(c, sum);
}
}
extern "C" void dotProduct(const float *d_A, const float *d_B, float *d_C, int N, dim3 threadsPerBlock)
{
dim3 numBlocks;
if (threadsPerBlock.y == 1 && threadsPerBlock.z == 1 ) //1D
{
numBlocks.x = (N + threadsPerBlock.x - 1) / threadsPerBlock.x;
}
else return;
printf("Thread configuration (%d,%d,%d) \n",threadsPerBlock.x, threadsPerBlock.y,threadsPerBlock.z);
printf("Block configuration (%d,%d,%d) \n", numBlocks.x, numBlocks.y, numBlocks.z);
hipLaunchKernelGGL(( dotProductKernel), dim3(numBlocks), dim3(threadsPerBlock), threadsPerBlock.x*sizeof(float), 0, d_A, d_B, d_C);
getLastCudaError("dotProduct execution failed.\n");
}
#endif
| 132b2d02676b3ce69e7dc1fbc6bedeadc8095d02.cu | #ifndef APP_MANAGER_KERNEL_CUH
#define APP_MANAGER_KERNEL_CUH
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void vectorAddKernel(const float *A, const float *B, float *C, int numElements)
{
int i;
if (blockDim.y == 1)
{
i = blockIdx.x*blockDim.x+threadIdx.x;
}
else if (blockDim.z == 1)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
i = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
}
else
{
// unique block index inside a 3D block grid
const unsigned long long int blockId = blockIdx.x //1D
+ blockIdx.y * gridDim.x //2D
+ gridDim.x * gridDim.y * blockIdx.z; //3D
// global unique thread index, block dimension uses only x-coordinate
// i = blockId * blockDim.x + threadIdx.x;
i = blockId * (blockDim.x * blockDim.y *blockDim.z) + ( threadIdx.z *blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;;
}
if (i < numElements)
{
// printf("%d - ",i);
C[i] = A[i] + B[i];
// printf("C[%d] %f = %f + %f \n ", i, C[i], A[i], B[i]);
}
}
extern "C" void vectorAdd(const float *d_A, const float *d_B, float *d_C, int numElements, dim3 threadsPerBlock)
{
dim3 numBlocks;
if (threadsPerBlock.y == 1 && threadsPerBlock.z == 1 ) //1D
{
numBlocks.x = (numElements + threadsPerBlock.x - 1) / threadsPerBlock.x;
}
else if (threadsPerBlock.z == 1) //2D 2D
{
int gridX = (sqrt(numElements) + threadsPerBlock.x - 1) / threadsPerBlock.x;
int gridY = (sqrt(numElements) + threadsPerBlock.y - 1) / threadsPerBlock.y;
numBlocks = dim3(gridX, gridY);
}
else //3D 3D
{
long double sr = std::pow(numElements, 1/3.);
int gridX = (sr + threadsPerBlock.x - 1) / threadsPerBlock.x;
int gridY = (sr + threadsPerBlock.y - 1) / threadsPerBlock.y;
int gridZ = (sr + threadsPerBlock.z - 1) / threadsPerBlock.z;
if (sr -floor(sr) != 0 )
{
gridX++;
}
numBlocks = dim3(gridX,gridY,gridZ);
}
printf("Thread configuration (%d,%d,%d) \n",threadsPerBlock.x, threadsPerBlock.y,threadsPerBlock.z);
printf("Block configuration (%d,%d,%d) \n", numBlocks.x, numBlocks.y, numBlocks.z);
vectorAddKernel<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
getLastCudaError("vectorAddKernel execution failed.\n");
}
__global__ void matrixMultKernel(const float *a,const float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
// printf("C[%d] %f = %f + %f \n ", i, sum, a[i], b[i]);
}
c[row * k + col] = sum;
}
}
extern "C" void matrixMult(const float *d_A, const float *d_B, float *d_C, int M, int N, int K, dim3 threadsPerBlock)
{
if (threadsPerBlock.y == 1 && threadsPerBlock.z == 1 ) //1D
{
}
else if (threadsPerBlock.z == 1) //2D 2D
{
}
else
{
}
dim3 numBlocks((M + threadsPerBlock.x - 1) / threadsPerBlock.x, (M + threadsPerBlock.y - 1) / threadsPerBlock.y);
printf("Thread configuration (%d,%d,%d) \n",threadsPerBlock.x, threadsPerBlock.y,threadsPerBlock.z);
printf("Block configuration (%d,%d,%d) \n", numBlocks.x, numBlocks.y, numBlocks.z);
matrixMultKernel<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, M, N, K);
getLastCudaError("matrixMult execution failed.\n");
}
// The kernel - DOT PRODUCT
__global__ void dotProductKernel(const float *a,const float *b, float *c)
{
extern __shared__ float temp[];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
//Synch threads
__syncthreads();
if (0 == threadIdx.x) {
float sum = 0.00;
int i;
for (i=0; i<blockDim.x; i++)
sum += temp[i];
atomicAdd(c, sum);
}
}
extern "C" void dotProduct(const float *d_A, const float *d_B, float *d_C, int N, dim3 threadsPerBlock)
{
dim3 numBlocks;
if (threadsPerBlock.y == 1 && threadsPerBlock.z == 1 ) //1D
{
numBlocks.x = (N + threadsPerBlock.x - 1) / threadsPerBlock.x;
}
else return;
printf("Thread configuration (%d,%d,%d) \n",threadsPerBlock.x, threadsPerBlock.y,threadsPerBlock.z);
printf("Block configuration (%d,%d,%d) \n", numBlocks.x, numBlocks.y, numBlocks.z);
dotProductKernel<<<numBlocks, threadsPerBlock, threadsPerBlock.x*sizeof(float)>>>(d_A, d_B, d_C);
getLastCudaError("dotProduct execution failed.\n");
}
#endif
|
fdf0f336e157536e0245ce43acf98e199152a619.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <distributed/distributed_io.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust_wrapper.h>
#include <amgx_types/util.h>
namespace amgx
{
template <class T>
AMGX_ERROR free_maps_one_ring(T num_neighbors, T *neighbors, T *btl_sizes, T **btl_maps, T *lth_sizes, T **lth_maps)
{
if (neighbors != NULL) { free(neighbors); }
if (btl_maps != NULL)
{
for (T i = 0; i < num_neighbors; i++)
if (btl_maps[i] != NULL) { free(btl_maps[i]); }
free(btl_maps);
}
if (lth_maps != NULL)
{
for (T i = 0; i < num_neighbors; i++)
if (lth_maps[i] != NULL) { free(lth_maps[i]); }
free(lth_maps);
}
if (btl_sizes != NULL) { free(btl_sizes); }
if (lth_sizes != NULL) { free(lth_sizes); }
return AMGX_OK;
}
namespace
{
// partitioning routines
void create_part_offsets_equal_rows(INDEX_TYPE num_part, int64_t num_rows_total, int64_t *part_offsets_h)
{
for (int i = 0; i < num_part; i++)
{
part_offsets_h[i] = i * num_rows_total / num_part;
}
part_offsets_h[num_part] = num_rows_total;
}
template <class T_Config_src, class T_Config_dst>
void transfer_values(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const Matrix<T_Config_src> &A, Matrix<T_Config_dst> &A_part)
{
if (A_part.values.size() == 0 || num_part == 0 || A_part.row_offsets.size() == 0)
{
FatalError("Partitioning was not performed", AMGX_ERR_BAD_PARAMETERS);
}
thrust::copy(A.values.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id]]*A.get_block_size(), A.values.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id + 1]]*A.get_block_size(), A_part.values.begin());
if (A.hasProps(DIAG))
{
thrust::copy(A.values.begin() + (A.diag[0] + part_offsets[my_id])*A.get_block_size(), A.values.begin() + (A.diag[0] + part_offsets[my_id + 1])*A.get_block_size(), A_part.values.begin() + A_part.row_offsets[A_part.row_offsets.size() - 1]*A.get_block_size());
cudaCheckError();
}
}
template <class T_Config_src, class T_Config_dst>
void copyPartition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const INDEX_TYPE block_dimx, const INDEX_TYPE block_dimy, const Vector<T_Config_src> &b, const Vector<T_Config_src> &x, Vector<T_Config_dst> &b_part, Vector<T_Config_dst> &x_part)
{
if (b.is_vector_read_partitioned())
{
b_part.set_block_dimx(1);
b_part.set_block_dimy(block_dimy);
x_part.set_block_dimx(1);
x_part.set_block_dimy(block_dimx);
b_part.resize(b.size());
x_part.resize(x.size());
thrust::copy(b.begin(), b.end(), b_part.begin());
thrust::copy(x.begin(), x.end(), x_part.begin());
cudaCheckError();
}
else
{
FatalError("Partitioning mismatch", AMGX_ERR_BAD_PARAMETERS);
}
}
template <class T_Config_src, class T_Config_dst>
void partition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const INDEX_TYPE block_dimx, const INDEX_TYPE block_dimy, const Vector<T_Config_src> &b, const Vector<T_Config_src> &x, Vector<T_Config_dst> &b_part, Vector<T_Config_dst> &x_part)
{
b_part.set_block_dimx(1);
b_part.set_block_dimy(block_dimy);
x_part.set_block_dimx(1);
x_part.set_block_dimy(block_dimx);
b_part.resize((part_offsets[my_id + 1] - part_offsets[my_id])*block_dimy);
x_part.resize((part_offsets[my_id + 1] - part_offsets[my_id])*block_dimx);
thrust::copy(b.begin() + part_offsets[my_id]*block_dimy, b.begin() + part_offsets[my_id + 1]*block_dimy, b_part.begin());
thrust::copy(x.begin() + part_offsets[my_id]*block_dimx, x.begin() + part_offsets[my_id + 1]*block_dimx, x_part.begin());
cudaCheckError();
}
template <class T_Config_src, class T_Config_dst>
void copyPartition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const Matrix<T_Config_src> &A, Matrix<T_Config_dst> &A_part)
{
if (A.is_matrix_read_partitioned())
{
A_part.addProps(CSR);
if (A.hasProps(DIAG)) { A_part.addProps(DIAG); }
A_part.resize(A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), A.get_block_dimy(), A.get_block_dimx(), 1);
thrust::copy(A.col_indices.begin(), A.col_indices.end(), A_part.col_indices.begin());
thrust::copy(A.row_offsets.begin(), A.row_offsets.end(), A_part.row_offsets.begin());
thrust::copy(A.values.begin(), A.values.end(), A_part.values.begin());
if (A.hasProps(DIAG))
{
/*
thrust::copy(A.values.begin() + A.get_num_nz()*A.get_block_size(), A.values.end(), A_part.values.begin()+A_part.row_offsets[A_part.row_offsets.size()-1]*A.get_block_size());*/
A_part.addProps(DIAG);
}
cudaCheckError();
}
else
{
FatalError("Partitioning mismatch", AMGX_ERR_BAD_PARAMETERS);
}
}
template <class T_Config_src, class T_Config_dst>
void partition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const Matrix<T_Config_src> &A, Matrix<T_Config_dst> &A_part)
{
if (num_part == 0)
{
FatalError("Partitioning scheme is not set", AMGX_ERR_BAD_PARAMETERS);
}
A_part.addProps(CSR);
if (A.hasProps(DIAG)) { A_part.addProps(DIAG); }
int device = -1;
hipGetDevice( &device );
printf("Processing partition %d/%d size: %ld offset %d nnz %d on device %d\n", my_id + 1, num_part, part_offsets[my_id + 1] - part_offsets[my_id], (int)part_offsets[my_id], (int)(A.row_offsets[part_offsets[my_id + 1]] - A.row_offsets[part_offsets[my_id]]), device);
A_part.resize(part_offsets[my_id + 1] - part_offsets[my_id], A.get_num_cols(), (INDEX_TYPE)A.row_offsets[part_offsets[my_id + 1]] - (INDEX_TYPE)A.row_offsets[part_offsets[my_id]], A.get_block_dimy(), A.get_block_dimx(), 1);
thrust::copy(A.col_indices.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id]], A.col_indices.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id + 1]], A_part.col_indices.begin());
thrust::copy(A.row_offsets.begin() + part_offsets[my_id], A.row_offsets.begin() + part_offsets[my_id + 1] + 1, A_part.row_offsets.begin());
thrust::transform(A_part.row_offsets.begin(), A_part.row_offsets.end(), thrust::constant_iterator<INDEX_TYPE>(A.row_offsets[part_offsets[my_id]]), A_part.row_offsets.begin(), thrust::minus<INDEX_TYPE>());
cudaCheckError();
transfer_values(my_id, num_part, part_offsets, A, A_part);
}
} // end of partitioning routines
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::genRowPartitionsEqual(int partitions, IVector_h &partSize, int n_rows, IVector_h &partitionVec)
{
int i, p;
IVector_h scanPartSize(partitions + 1);
//Initialize partSize, partitionVec by equal partitioning
for (p = 0; p < partitions; p++)
{
uint64_t t = (uint64_t)p * (uint64_t)n_rows / (uint64_t)partitions;
scanPartSize[p] = t;
}
scanPartSize[partitions] = n_rows;
partSize.resize(partitions);
partitionVec.resize(n_rows);
p = 0;
for (i = 0; i < n_rows; i++)
{
if (i >= scanPartSize[p + 1]) { p++; }
partitionVec[i] = p;
}
for (p = 0; p < partitions; p++)
{
partSize[p] = scanPartSize[p + 1] - scanPartSize[p];
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::consolidatePartitions(IVector_h &partSize, IVector_h &partitionVec, int partitions)
{
std::stringstream msg;
int read_partitions = 0;
for (int i = 0; i < partitionVec.size(); i++) { read_partitions = ::max(read_partitions, partitionVec[i]); }
read_partitions++;
if (read_partitions % partitions != 0)
{
FatalError("Only integer number of partitions per rank is supported", AMGX_ERR_IO);
}
if (read_partitions != partitions)
{
msg << "Found " << read_partitions << " performing consolidation\n";
}
int partsPerRank = read_partitions / partitions;
partSize.resize(partitions);
thrust::fill(partSize.begin(), partSize.end(), 0);
cudaCheckError();
for (int i = 0; i < partitionVec.size(); i++)
{
int p = partitionVec[i] / partsPerRank;
partitionVec[i] = p;
partSize[p]++;
}
msg << "Read consolidated partition sizes: ";
for (int i = 0; i < partitions; i++)
{
msg << partSize[i] << " ";
}
msg << "\n";
amgx_output(msg.str().c_str(), 0);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readRowPartitions(const char *fnamec, int num_partitions, IVector_h &partSize, IVector_h &partitionVec)
{
/*
Partition vector format
vector of int - global array of partition ids, mapping each row_id to a partition id
*/
std::string err, fname(fnamec);
int N;
std::stringstream msg;
FILE *fin_rowpart = fopen(fname.c_str(), "rb");
const int size_of_int = sizeof(int);
char buf[size_of_int];
int c;
int chr_cnt = 0;
while ((c = fgetc(fin_rowpart)) != EOF)
{
buf[chr_cnt] = (char) c;
chr_cnt++;
if (chr_cnt % size_of_int == 0)
{
partitionVec.push_back(int(*buf));
chr_cnt = 0;
}
}
N = partitionVec.size();
msg << "Finished reading partition vector, consisting of " << N << " rows\n";
amgx_output(msg.str().c_str(), 0);
consolidatePartitions(partSize, partitionVec, num_partitions);
//amgx_output(msg.str().c_str(), 0);
fclose(fin_rowpart);
}
// Remap column indices: column indices that correspond to rows
// belonging to the same partition will be consecutive after remapping.
// For instance if partition is [0 1 0 1 0 1], remapping of columns will be:
// 0 -> 0
// 1 -> 3
// 2 -> 1
// 3 -> 4
// 4 -> 2
// 5 -> 5
// In other words:
// 0 - 2 is partition 0.
// 3 - 5 is partition 1.
// This way it is easy to determine if we have an edge to another partition just by using the column index.
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::genMapRowPartitions(int rank, const IVector_h &partSize, IVector_h &partitionVec, IVector_h &partRowVec)
{
// partitionVec will contain Column Map on exit
if (partitionVec.size() == 0) { return; }
int num_part = partSize.size();
IVector_h scanPartSize(num_part + 1); // scan of partition sizes
IVector_h partCount(num_part, 0);// partition counters
IVector_h &colMapVec = partitionVec; // map for column indices old_glb_i-> new_glb_i, reusing the same vector
int p;
thrust::inclusive_scan(partSize.begin(), partSize.end(), &scanPartSize[1]);
cudaCheckError();
scanPartSize[0] = 0;
for (int old_glb_i = 0; old_glb_i < partitionVec.size(); old_glb_i++)
{
//printf("partitionVec[%d] = %d\n", old_glb_i, partitionVec[old_glb_i]);
if (partitionVec[old_glb_i] >= num_part)
{
FatalError("Bad partition vector", AMGX_ERR_IO);
}
p = partitionVec[old_glb_i];
if (p == rank) { partRowVec.push_back(old_glb_i); }
int new_loc_i = partCount[p];
int new_glb_i = scanPartSize[p] + new_loc_i;
colMapVec[old_glb_i] = new_glb_i;
partCount[p]++;
}
bool is_err = (partRowVec.size() != scanPartSize[rank + 1] - scanPartSize[rank]);
for (p = 0; p < num_part; p++)
{
is_err = is_err || (partCount[p] != scanPartSize[p + 1] - scanPartSize[p]);
}
std::string err = "Error: reading row offsets";
if (is_err) { FatalError(err, AMGX_ERR_IO); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::remapReadColumns(Matrix<TConfig_h> &A, IVector_h &colMapVec)
{
for (int i = 0; i < A.get_num_nz(); i++)
{
A.col_indices[i] = colMapVec[A.col_indices[i]];
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_h> &A, Vector<TConfig_h> &b, Vector<TConfig_h> &x, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
AMG_Configuration t_amgx_cfg;
AMG_Config *amgx_cfg = t_amgx_cfg.getConfigObject();
// Call the reader, but only read the dimensions of the matrix.
// Matrix A will have its dimensions set, but its content will be empty.
if (AMGX_OK != MatrixIO<TConfig_h>::readSystem(fnamec, A, *amgx_cfg, io_config::SIZE))
{
return AMGX_ERR_IO;
}
int n_rows = A.get_num_rows();
// Discard the matrix.
A.set_initialized(0);
A.resize(0, 0, 0, 1, 1); //Have to do this for proper resize later on
A.set_initialized(1);
// No partition was provided, use naive partitioning.
if (partitionVec.size() == 0)
{
genRowPartitionsEqual(partitions, partSize, n_rows, partitionVec);
}
if (n_rows != partitionVec.size())
{
FatalError("partition vector size does not match with matrix dimensions.", AMGX_ERR_CONFIGURATION);
}
IVector_h partRowVec;
if (partSize.size() == 0)
{
consolidatePartitions(partSize, partitionVec, partitions);
}
genMapRowPartitions(part, partSize, partitionVec, partRowVec); //partitionVec contains the map now
// partitionVec now contains columns remapping information (see remapReadColumns below).
// partRowVec now contains the list of owned rows.
// Call the distributed reader, the system will be read into A, b and x.
// Entries are filtered during reading using partRowVec. Entries
// that do not belong to the current partition are discarded.
if (AMGX_OK != MatrixIO<TConfig_h>::readSystem(fnamec, A, b, x, *amgx_cfg, props, partRowVec))
{
return AMGX_ERR_IO;
}
remapReadColumns(A, partitionVec);
return AMGX_OK;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_h> &A, Vector<TConfig_h> &b, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Vector<TConfig_h> v = Vector<TConfig_h>(0);
if (io_config::hasProps(io_config::RHS, props))
{
return distributedRead(fnamec, A, b, v, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
else
{
return distributedRead(fnamec, A, v, b, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
}
// Service function to partition matrix and vectors on host,then upload matrix partition to device and compute maps internally without boundary separation
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedReadDeviceInit(const char *fnamec, Matrix<TConfig_h> &Ah_part, Matrix<TConfig_d> &A, Vector<TConfig_h> &bh_part, Vector<TConfig_h> &xh_part, I64Vector_h &part_offsets_h, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Matrix<TConfig_h> Ah;
Vector<TConfig_h> bh;
Vector<TConfig_h> xh;
typedef typename VecPrecisionMap<t_vecPrec>::Type ValueTypeB;
Ah_part.setResources(A.getResources());
Ah.setResources(A.getResources());
AMGX_ERROR err = DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(fnamec, Ah, bh, xh, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
if (err != AMGX_OK)
{
return err;
}
//hipSetDevice(A.getResources()->getDevice(0));
part_offsets_h.resize(partitions + 1);
int64_t num_rows = Ah.get_num_rows();
if (!Ah.is_matrix_read_partitioned())
{
create_part_offsets_equal_rows(partitions, num_rows, part_offsets_h.raw());
partition<TConfig_h, TConfig_h>(part, partitions, part_offsets_h.raw(), Ah, Ah_part);
partition<TConfig_h, TConfig_h>(part, partitions, part_offsets_h.raw(), Ah.get_block_dimx(), Ah.get_block_dimy(), bh, xh, bh_part, xh_part);
}
else
{
Ah_part.swap(Ah);
bh_part.swap(bh);
xh_part.swap(xh);
thrust::inclusive_scan(partSize.begin(), partSize.end(), &part_offsets_h[1]);
cudaCheckError();
part_offsets_h[0] = 0;
}
Ah_part.set_is_matrix_read_partitioned(true);
bh_part.set_is_vector_read_partitioned(true);
xh_part.set_is_vector_read_partitioned(true);
// upload a copy to device
copyPartition<TConfig_h, TConfig_d>(part, partitions, part_offsets_h.raw(), Ah_part, A);
if (xh_part.size() == 0)
{
if (part == 0)
{
printf("Initializing solution vector with zeroes...\n");
}
xh_part.resize(num_rows * A.get_block_dimx());
thrust::fill(xh_part.begin(), xh_part.end(), types::util<ValueTypeB>::get_zero());
}
if (A.manager == NULL )
{
A.manager = new DistributedManager<TConfig_d>(A);
}
else
{
A.setManagerExternal();
}
A.manager->createComms(A.getResources());
// copy part offsets
A.manager->part_offsets_h.resize(partitions + 1);
for (int i = 0; i < partitions + 1; i++)
{
A.manager->part_offsets_h[i] = part_offsets_h[i];
}
A.manager->num_rows_global = A.manager->part_offsets_h[partitions];
A.manager->part_offsets = A.manager->part_offsets_h;
if (partitions > 1)
{
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
prep->set_part_offsets(partitions, part_offsets_h.raw());
prep->create_B2L(A, part, 1);
delete prep;
}
return AMGX_OK;
}
template <class Mat>
void getConsolidationFlags( const Mat *A, int *consolidate_flag, int *cuda_ipc_flag)
{
AMG_Config *rsrc_cfg = A->getResources()->getResourcesConfig();
std::string scope;
rsrc_cfg->getParameter<int>("fine_level_consolidation", *consolidate_flag, "default", scope);
rsrc_cfg->getParameter<int>("use_cuda_ipc_consolidation", *cuda_ipc_flag, "default", scope);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_d> &A, Vector<TConfig_d> &b, Vector<TConfig_d> &x, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Resources *resources = A.getResources();
hipSetDevice(resources->getDevice(0));
Matrix<TConfig_h> Ah;
Vector<TConfig_h> bh;
Vector<TConfig_h> xh;
I64Vector_h part_offsets_h;
std::string solver_scope, solver_value;
std::string precond_scope, precond_value;
AlgorithmType algorithm_s, algorithm_p;
resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope);
algorithm_s = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", solver_scope);
resources->getResourcesConfig()->getParameter<std::string>("preconditioner", precond_value, solver_scope, precond_scope);
algorithm_p = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", precond_scope);
bool isClassical = false;
// Detect whether one does CLASSICAL or AGGREGATION
if (algorithm_s == CLASSICAL && algorithm_p == CLASSICAL)
{
if (allocated_halo_depth > 2)
{
FatalError("allocated_halo_depth > 2 not supported in CLASSICAL", AMGX_ERR_BAD_PARAMETERS);
}
isClassical = true;
}
else
{
if (allocated_halo_depth > 1)
{
FatalError("allocated_halo_depth > 1 not supported in AGGREGATION", AMGX_ERR_BAD_PARAMETERS);
}
}
//Matrix<TConfig_d>* Ad = new Matrix<TConfig_d>();
//Ad->setResources(A.getResources());
Matrix<TConfig_d> *Ad;
int consolidate_flag, cuda_ipc_flag;
getConsolidationFlags( &A, &consolidate_flag, &cuda_ipc_flag);
if (consolidate_flag != 0 && partitions > 1 && A.get_allow_boundary_separation())
{
Ad = new Matrix<TConfig_d>;
Ad->setResources(resources);
}
else
{
Ad = &A;
}
if (isClassical && consolidate_flag)
{
FatalError("Fine level consolidation not supported in CLASSICAL", AMGX_ERR_BAD_PARAMETERS);
}
// Reset distributed manager
if (A.manager != NULL )
{
delete A.manager;
A.manager = NULL;
}
A.manager = new DistributedManager<TConfig_d>(A);
A.setManagerExternal();
AMGX_ERROR err = DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedReadDeviceInit(fnamec, Ah, *Ad, bh, xh, part_offsets_h, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
if (err != AMGX_OK)
{
return err;
}
int64_t num_rows = Ah.get_num_rows();
int block_dimx = Ah.get_block_dimx();
int block_dimy = Ah.get_block_dimy();
int block_size = block_dimx * block_dimy;
void *x_host = xh.raw(), *b_host = bh.raw();
int sizeof_v_val = (t_vecPrec == AMGX_vecDouble) ? sizeof(double) : sizeof(float);
//int sizeof_v_val = sizeof(typename VecPrecisionMap<t_vecPrec>::Type);
hipHostRegister(x_host, num_rows * block_dimx * sizeof_v_val, hipHostRegisterMapped);
hipHostRegister(b_host, num_rows * block_dimy * sizeof_v_val, hipHostRegisterMapped);
cudaCheckError();
if (!isClassical)
{
// AGGREGATION path
if (partitions > 1)
{
if (Ad->get_allow_boundary_separation())
{
// TODO: This can be done without exporting of maps
int *btl_sizes = NULL;
int **btl_maps = NULL;
int *lth_sizes = NULL;
int **lth_maps = NULL;
int *neighbors = NULL;
int num_neighbors = Ad->manager->num_neighbors();
neighbors = (int *)malloc((num_neighbors) * sizeof(int));
Ad->manager->malloc_export_maps(&btl_maps, &btl_sizes, <h_maps, <h_sizes);
Ad->manager->export_neighbors(neighbors);
if (A.manager != NULL )
{
delete A.manager;
}
A.manager = new DistributedManager<TConfig_d>(A, 1, 1, num_neighbors, neighbors);
A.manager->cacheMapsOneRing((int const **) btl_maps, (const int *)btl_sizes, (int const **)lth_maps, (const int *)lth_sizes);
A.setManagerExternal();
A.manager->createComms(A.getResources());
A.manager->setAConsolidationFlags(A);
if (A.manager->isFineLevelConsolidated())
{
A.addProps(CSR);
A.setColsReorderedByColor(false);
A.delProps(COO);
A.delProps(DIAG);
A.setColsReorderedByColor(false);
if (Ah.hasProps(DIAG))
{
A.addProps(DIAG);
}
int nnz = Ah.get_num_nz();
typedef typename MatPrecisionMap<t_matPrec>::Type ValueType;
int *row_ptrs = NULL, *col_indices = NULL;
void *values, *diag_data = NULL;
// Use local column indicies now
col_indices = Ad->col_indices.raw();
//row_ptrs = Ad->row_offsets.raw();
// values = Ad->values.raw();
//col_indices = Ad->col_indices.raw();
// row offsets are still global and not reordered
row_ptrs = Ah.row_offsets.raw();
values = Ah.values.raw();
// Do pinning of some buffers since fine level consolidation crashes when only one GPU is used
int sizeof_m_val = sizeof(ValueType);
hipHostRegister(values, nnz * block_size * sizeof_m_val, hipHostRegisterMapped);
cudaCheckError();
if (Ah.hasProps(DIAG))
{
//diag_data = (Ad->values.raw() + nnz*block_size);
diag_data = (Ah.values.raw() + nnz * block_size);
hipHostRegister((void *)diag_data, num_rows * block_size * sizeof_m_val, hipHostRegisterMapped);
cudaCheckError();
}
/*
hipHostRegister(col_indices,nnz*sizeof(int),hipHostRegisterMapped);
cudaCheckError();
*/
hipHostRegister(row_ptrs, (num_rows + 1)*sizeof(int), hipHostRegisterMapped);
cudaCheckError();
hipSetDevice(A.getResources()->getDevice(0));
A.manager->consolidateAndUploadAll(num_rows, nnz, block_dimx, block_dimy, row_ptrs, col_indices, values, diag_data, A);
A.set_initialized(1);
hipSetDevice(A.getResources()->getDevice(0));
if (diag_data != NULL)
{
hipHostUnregister(diag_data);
}
hipHostUnregister(values);
hipHostUnregister(row_ptrs);
//hipHostUnregister(col_indices);
cudaCheckError();
delete Ad;
}
else
{
A.manager->createComms(A.getResources());
A.manager->updateMapsReorder();
} // End consolidation check
free_maps_one_ring<int>(num_neighbors, neighbors, btl_sizes, btl_maps, lth_sizes, lth_maps);
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
x.set_is_vector_read_partitioned(true);
x.setManager(*(A.manager));
b.setManager(*(A.manager));
b.set_block_dimx(1);
b.set_block_dimy(block_dimy);
x.set_block_dimx(1);
x.set_block_dimy(block_dimx);
A.manager->transformAndUploadVector(b, b_host, num_rows, b.get_block_dimy());
A.manager->transformAndUploadVector(x, x_host, num_rows, x.get_block_dimy());
hipHostUnregister(b_host);
hipHostUnregister(x_host);
hipDeviceSynchronize();
return AMGX_OK;
} // end of boundary sparation
}
else
{
A.computeDiagonal();
A.set_initialized(1);
A.setView(OWNED);
} // End if partitions>1
}
else
{
// CLASSICAL
/* WARNING: in the classical path, even if a single partition is used it needs
to setup the distributed data structures, because they are later used in the
code. For instance, halo_offsets must be set correctly, otherwise
generateInterpolationMatrix_1x1 -> exchange_halo_2ring -> setup -> do_setup
will fail when accessing them. Therefore, classical path can not be encapsulated
in the if (npartitions > 1) { ... } statement and cl-116816 moves it out. */
if (Ad->get_allow_boundary_separation())
{
A.set_initialized(0);
A.manager->neighbors.resize(0);
A.manager->renumberMatrixOneRing();
A.manager->createOneRingHaloRows();
A.manager->getComms()->set_neighbors(A.manager->num_neighbors());
A.setView(OWNED);
A.set_initialized(1);
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
x.set_is_vector_read_partitioned(true);
x.setManager(*(A.manager));
b.setManager(*(A.manager));
b.set_block_dimx(1);
b.set_block_dimy(block_dimy);
x.set_block_dimx(1);
x.set_block_dimy(block_dimx);
A.manager->transformAndUploadVector(b, b_host, num_rows, b.get_block_dimy());
A.manager->transformAndUploadVector(x, x_host, num_rows, x.get_block_dimy());
hipHostUnregister(b_host);
hipHostUnregister(x_host);
hipDeviceSynchronize();
return AMGX_OK;
}
}
// just copy remaining data to device
copyPartition<TConfig_h, TConfig_d>(part, partitions, part_offsets_h.raw(), Ah.get_block_dimx(), Ah.get_block_dimy(), bh, xh, b, x);
hipHostUnregister(b_host);
hipHostUnregister(x_host);
return AMGX_OK;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_d> &A, Vector<TConfig_d> &b, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Vector<TConfig_d> v = Vector<TConfig_d>(0);
if (io_config::hasProps(io_config::RHS, props))
{
return distributedRead(fnamec, A, b, v, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
else
{
return distributedRead(fnamec, A, v, b, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class DistributedRead<TemplateMode<CASE>::Type >;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
| fdf0f336e157536e0245ce43acf98e199152a619.cu | /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <distributed/distributed_io.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust_wrapper.h>
#include <amgx_types/util.h>
namespace amgx
{
template <class T>
AMGX_ERROR free_maps_one_ring(T num_neighbors, T *neighbors, T *btl_sizes, T **btl_maps, T *lth_sizes, T **lth_maps)
{
if (neighbors != NULL) { free(neighbors); }
if (btl_maps != NULL)
{
for (T i = 0; i < num_neighbors; i++)
if (btl_maps[i] != NULL) { free(btl_maps[i]); }
free(btl_maps);
}
if (lth_maps != NULL)
{
for (T i = 0; i < num_neighbors; i++)
if (lth_maps[i] != NULL) { free(lth_maps[i]); }
free(lth_maps);
}
if (btl_sizes != NULL) { free(btl_sizes); }
if (lth_sizes != NULL) { free(lth_sizes); }
return AMGX_OK;
}
namespace
{
// partitioning routines
void create_part_offsets_equal_rows(INDEX_TYPE num_part, int64_t num_rows_total, int64_t *part_offsets_h)
{
for (int i = 0; i < num_part; i++)
{
part_offsets_h[i] = i * num_rows_total / num_part;
}
part_offsets_h[num_part] = num_rows_total;
}
template <class T_Config_src, class T_Config_dst>
void transfer_values(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const Matrix<T_Config_src> &A, Matrix<T_Config_dst> &A_part)
{
if (A_part.values.size() == 0 || num_part == 0 || A_part.row_offsets.size() == 0)
{
FatalError("Partitioning was not performed", AMGX_ERR_BAD_PARAMETERS);
}
thrust::copy(A.values.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id]]*A.get_block_size(), A.values.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id + 1]]*A.get_block_size(), A_part.values.begin());
if (A.hasProps(DIAG))
{
thrust::copy(A.values.begin() + (A.diag[0] + part_offsets[my_id])*A.get_block_size(), A.values.begin() + (A.diag[0] + part_offsets[my_id + 1])*A.get_block_size(), A_part.values.begin() + A_part.row_offsets[A_part.row_offsets.size() - 1]*A.get_block_size());
cudaCheckError();
}
}
template <class T_Config_src, class T_Config_dst>
void copyPartition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const INDEX_TYPE block_dimx, const INDEX_TYPE block_dimy, const Vector<T_Config_src> &b, const Vector<T_Config_src> &x, Vector<T_Config_dst> &b_part, Vector<T_Config_dst> &x_part)
{
if (b.is_vector_read_partitioned())
{
b_part.set_block_dimx(1);
b_part.set_block_dimy(block_dimy);
x_part.set_block_dimx(1);
x_part.set_block_dimy(block_dimx);
b_part.resize(b.size());
x_part.resize(x.size());
thrust::copy(b.begin(), b.end(), b_part.begin());
thrust::copy(x.begin(), x.end(), x_part.begin());
cudaCheckError();
}
else
{
FatalError("Partitioning mismatch", AMGX_ERR_BAD_PARAMETERS);
}
}
template <class T_Config_src, class T_Config_dst>
void partition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const INDEX_TYPE block_dimx, const INDEX_TYPE block_dimy, const Vector<T_Config_src> &b, const Vector<T_Config_src> &x, Vector<T_Config_dst> &b_part, Vector<T_Config_dst> &x_part)
{
b_part.set_block_dimx(1);
b_part.set_block_dimy(block_dimy);
x_part.set_block_dimx(1);
x_part.set_block_dimy(block_dimx);
b_part.resize((part_offsets[my_id + 1] - part_offsets[my_id])*block_dimy);
x_part.resize((part_offsets[my_id + 1] - part_offsets[my_id])*block_dimx);
thrust::copy(b.begin() + part_offsets[my_id]*block_dimy, b.begin() + part_offsets[my_id + 1]*block_dimy, b_part.begin());
thrust::copy(x.begin() + part_offsets[my_id]*block_dimx, x.begin() + part_offsets[my_id + 1]*block_dimx, x_part.begin());
cudaCheckError();
}
template <class T_Config_src, class T_Config_dst>
void copyPartition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const Matrix<T_Config_src> &A, Matrix<T_Config_dst> &A_part)
{
if (A.is_matrix_read_partitioned())
{
A_part.addProps(CSR);
if (A.hasProps(DIAG)) { A_part.addProps(DIAG); }
A_part.resize(A.get_num_rows(), A.get_num_cols(), A.get_num_nz(), A.get_block_dimy(), A.get_block_dimx(), 1);
thrust::copy(A.col_indices.begin(), A.col_indices.end(), A_part.col_indices.begin());
thrust::copy(A.row_offsets.begin(), A.row_offsets.end(), A_part.row_offsets.begin());
thrust::copy(A.values.begin(), A.values.end(), A_part.values.begin());
if (A.hasProps(DIAG))
{
/*
thrust::copy(A.values.begin() + A.get_num_nz()*A.get_block_size(), A.values.end(), A_part.values.begin()+A_part.row_offsets[A_part.row_offsets.size()-1]*A.get_block_size());*/
A_part.addProps(DIAG);
}
cudaCheckError();
}
else
{
FatalError("Partitioning mismatch", AMGX_ERR_BAD_PARAMETERS);
}
}
template <class T_Config_src, class T_Config_dst>
void partition(const INDEX_TYPE my_id, INDEX_TYPE num_part, const int64_t *part_offsets, const Matrix<T_Config_src> &A, Matrix<T_Config_dst> &A_part)
{
if (num_part == 0)
{
FatalError("Partitioning scheme is not set", AMGX_ERR_BAD_PARAMETERS);
}
A_part.addProps(CSR);
if (A.hasProps(DIAG)) { A_part.addProps(DIAG); }
int device = -1;
cudaGetDevice( &device );
printf("Processing partition %d/%d size: %ld offset %d nnz %d on device %d\n", my_id + 1, num_part, part_offsets[my_id + 1] - part_offsets[my_id], (int)part_offsets[my_id], (int)(A.row_offsets[part_offsets[my_id + 1]] - A.row_offsets[part_offsets[my_id]]), device);
A_part.resize(part_offsets[my_id + 1] - part_offsets[my_id], A.get_num_cols(), (INDEX_TYPE)A.row_offsets[part_offsets[my_id + 1]] - (INDEX_TYPE)A.row_offsets[part_offsets[my_id]], A.get_block_dimy(), A.get_block_dimx(), 1);
thrust::copy(A.col_indices.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id]], A.col_indices.begin() + (INDEX_TYPE)A.row_offsets[part_offsets[my_id + 1]], A_part.col_indices.begin());
thrust::copy(A.row_offsets.begin() + part_offsets[my_id], A.row_offsets.begin() + part_offsets[my_id + 1] + 1, A_part.row_offsets.begin());
thrust::transform(A_part.row_offsets.begin(), A_part.row_offsets.end(), thrust::constant_iterator<INDEX_TYPE>(A.row_offsets[part_offsets[my_id]]), A_part.row_offsets.begin(), thrust::minus<INDEX_TYPE>());
cudaCheckError();
transfer_values(my_id, num_part, part_offsets, A, A_part);
}
} // end of partitioning routines
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::genRowPartitionsEqual(int partitions, IVector_h &partSize, int n_rows, IVector_h &partitionVec)
{
int i, p;
IVector_h scanPartSize(partitions + 1);
//Initialize partSize, partitionVec by equal partitioning
for (p = 0; p < partitions; p++)
{
uint64_t t = (uint64_t)p * (uint64_t)n_rows / (uint64_t)partitions;
scanPartSize[p] = t;
}
scanPartSize[partitions] = n_rows;
partSize.resize(partitions);
partitionVec.resize(n_rows);
p = 0;
for (i = 0; i < n_rows; i++)
{
if (i >= scanPartSize[p + 1]) { p++; }
partitionVec[i] = p;
}
for (p = 0; p < partitions; p++)
{
partSize[p] = scanPartSize[p + 1] - scanPartSize[p];
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::consolidatePartitions(IVector_h &partSize, IVector_h &partitionVec, int partitions)
{
std::stringstream msg;
int read_partitions = 0;
for (int i = 0; i < partitionVec.size(); i++) { read_partitions = std::max(read_partitions, partitionVec[i]); }
read_partitions++;
if (read_partitions % partitions != 0)
{
FatalError("Only integer number of partitions per rank is supported", AMGX_ERR_IO);
}
if (read_partitions != partitions)
{
msg << "Found " << read_partitions << " performing consolidation\n";
}
int partsPerRank = read_partitions / partitions;
partSize.resize(partitions);
thrust::fill(partSize.begin(), partSize.end(), 0);
cudaCheckError();
for (int i = 0; i < partitionVec.size(); i++)
{
int p = partitionVec[i] / partsPerRank;
partitionVec[i] = p;
partSize[p]++;
}
msg << "Read consolidated partition sizes: ";
for (int i = 0; i < partitions; i++)
{
msg << partSize[i] << " ";
}
msg << "\n";
amgx_output(msg.str().c_str(), 0);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::readRowPartitions(const char *fnamec, int num_partitions, IVector_h &partSize, IVector_h &partitionVec)
{
/*
Partition vector format
vector of int - global array of partition ids, mapping each row_id to a partition id
*/
std::string err, fname(fnamec);
int N;
std::stringstream msg;
FILE *fin_rowpart = fopen(fname.c_str(), "rb");
const int size_of_int = sizeof(int);
char buf[size_of_int];
int c;
int chr_cnt = 0;
while ((c = fgetc(fin_rowpart)) != EOF)
{
buf[chr_cnt] = (char) c;
chr_cnt++;
if (chr_cnt % size_of_int == 0)
{
partitionVec.push_back(int(*buf));
chr_cnt = 0;
}
}
N = partitionVec.size();
msg << "Finished reading partition vector, consisting of " << N << " rows\n";
amgx_output(msg.str().c_str(), 0);
consolidatePartitions(partSize, partitionVec, num_partitions);
//amgx_output(msg.str().c_str(), 0);
fclose(fin_rowpart);
}
// Remap column indices: column indices that correspond to rows
// belonging to the same partition will be consecutive after remapping.
// For instance if partition is [0 1 0 1 0 1], remapping of columns will be:
// 0 -> 0
// 1 -> 3
// 2 -> 1
// 3 -> 4
// 4 -> 2
// 5 -> 5
// In other words:
// 0 - 2 is partition 0.
// 3 - 5 is partition 1.
// This way it is easy to determine if we have an edge to another partition just by using the column index.
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::genMapRowPartitions(int rank, const IVector_h &partSize, IVector_h &partitionVec, IVector_h &partRowVec)
{
// partitionVec will contain Column Map on exit
if (partitionVec.size() == 0) { return; }
int num_part = partSize.size();
IVector_h scanPartSize(num_part + 1); // scan of partition sizes
IVector_h partCount(num_part, 0);// partition counters
IVector_h &colMapVec = partitionVec; // map for column indices old_glb_i-> new_glb_i, reusing the same vector
int p;
thrust::inclusive_scan(partSize.begin(), partSize.end(), &scanPartSize[1]);
cudaCheckError();
scanPartSize[0] = 0;
for (int old_glb_i = 0; old_glb_i < partitionVec.size(); old_glb_i++)
{
//printf("partitionVec[%d] = %d\n", old_glb_i, partitionVec[old_glb_i]);
if (partitionVec[old_glb_i] >= num_part)
{
FatalError("Bad partition vector", AMGX_ERR_IO);
}
p = partitionVec[old_glb_i];
if (p == rank) { partRowVec.push_back(old_glb_i); }
int new_loc_i = partCount[p];
int new_glb_i = scanPartSize[p] + new_loc_i;
colMapVec[old_glb_i] = new_glb_i;
partCount[p]++;
}
bool is_err = (partRowVec.size() != scanPartSize[rank + 1] - scanPartSize[rank]);
for (p = 0; p < num_part; p++)
{
is_err = is_err || (partCount[p] != scanPartSize[p + 1] - scanPartSize[p]);
}
std::string err = "Error: reading row offsets";
if (is_err) { FatalError(err, AMGX_ERR_IO); }
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::remapReadColumns(Matrix<TConfig_h> &A, IVector_h &colMapVec)
{
for (int i = 0; i < A.get_num_nz(); i++)
{
A.col_indices[i] = colMapVec[A.col_indices[i]];
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_h> &A, Vector<TConfig_h> &b, Vector<TConfig_h> &x, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
AMG_Configuration t_amgx_cfg;
AMG_Config *amgx_cfg = t_amgx_cfg.getConfigObject();
// Call the reader, but only read the dimensions of the matrix.
// Matrix A will have its dimensions set, but its content will be empty.
if (AMGX_OK != MatrixIO<TConfig_h>::readSystem(fnamec, A, *amgx_cfg, io_config::SIZE))
{
return AMGX_ERR_IO;
}
int n_rows = A.get_num_rows();
// Discard the matrix.
A.set_initialized(0);
A.resize(0, 0, 0, 1, 1); //Have to do this for proper resize later on
A.set_initialized(1);
// No partition was provided, use naive partitioning.
if (partitionVec.size() == 0)
{
genRowPartitionsEqual(partitions, partSize, n_rows, partitionVec);
}
if (n_rows != partitionVec.size())
{
FatalError("partition vector size does not match with matrix dimensions.", AMGX_ERR_CONFIGURATION);
}
IVector_h partRowVec;
if (partSize.size() == 0)
{
consolidatePartitions(partSize, partitionVec, partitions);
}
genMapRowPartitions(part, partSize, partitionVec, partRowVec); //partitionVec contains the map now
// partitionVec now contains columns remapping information (see remapReadColumns below).
// partRowVec now contains the list of owned rows.
// Call the distributed reader, the system will be read into A, b and x.
// Entries are filtered during reading using partRowVec. Entries
// that do not belong to the current partition are discarded.
if (AMGX_OK != MatrixIO<TConfig_h>::readSystem(fnamec, A, b, x, *amgx_cfg, props, partRowVec))
{
return AMGX_ERR_IO;
}
remapReadColumns(A, partitionVec);
return AMGX_OK;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_h> &A, Vector<TConfig_h> &b, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Vector<TConfig_h> v = Vector<TConfig_h>(0);
if (io_config::hasProps(io_config::RHS, props))
{
return distributedRead(fnamec, A, b, v, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
else
{
return distributedRead(fnamec, A, v, b, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
}
// Service function to partition matrix and vectors on host,then upload matrix partition to device and compute maps internally without boundary separation
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedReadDeviceInit(const char *fnamec, Matrix<TConfig_h> &Ah_part, Matrix<TConfig_d> &A, Vector<TConfig_h> &bh_part, Vector<TConfig_h> &xh_part, I64Vector_h &part_offsets_h, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Matrix<TConfig_h> Ah;
Vector<TConfig_h> bh;
Vector<TConfig_h> xh;
typedef typename VecPrecisionMap<t_vecPrec>::Type ValueTypeB;
Ah_part.setResources(A.getResources());
Ah.setResources(A.getResources());
AMGX_ERROR err = DistributedRead<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(fnamec, Ah, bh, xh, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
if (err != AMGX_OK)
{
return err;
}
//cudaSetDevice(A.getResources()->getDevice(0));
part_offsets_h.resize(partitions + 1);
int64_t num_rows = Ah.get_num_rows();
if (!Ah.is_matrix_read_partitioned())
{
create_part_offsets_equal_rows(partitions, num_rows, part_offsets_h.raw());
partition<TConfig_h, TConfig_h>(part, partitions, part_offsets_h.raw(), Ah, Ah_part);
partition<TConfig_h, TConfig_h>(part, partitions, part_offsets_h.raw(), Ah.get_block_dimx(), Ah.get_block_dimy(), bh, xh, bh_part, xh_part);
}
else
{
Ah_part.swap(Ah);
bh_part.swap(bh);
xh_part.swap(xh);
thrust::inclusive_scan(partSize.begin(), partSize.end(), &part_offsets_h[1]);
cudaCheckError();
part_offsets_h[0] = 0;
}
Ah_part.set_is_matrix_read_partitioned(true);
bh_part.set_is_vector_read_partitioned(true);
xh_part.set_is_vector_read_partitioned(true);
// upload a copy to device
copyPartition<TConfig_h, TConfig_d>(part, partitions, part_offsets_h.raw(), Ah_part, A);
if (xh_part.size() == 0)
{
if (part == 0)
{
printf("Initializing solution vector with zeroes...\n");
}
xh_part.resize(num_rows * A.get_block_dimx());
thrust::fill(xh_part.begin(), xh_part.end(), types::util<ValueTypeB>::get_zero());
}
if (A.manager == NULL )
{
A.manager = new DistributedManager<TConfig_d>(A);
}
else
{
A.setManagerExternal();
}
A.manager->createComms(A.getResources());
// copy part offsets
A.manager->part_offsets_h.resize(partitions + 1);
for (int i = 0; i < partitions + 1; i++)
{
A.manager->part_offsets_h[i] = part_offsets_h[i];
}
A.manager->num_rows_global = A.manager->part_offsets_h[partitions];
A.manager->part_offsets = A.manager->part_offsets_h;
if (partitions > 1)
{
DistributedArranger<TConfig_d> *prep = new DistributedArranger<TConfig_d>;
prep->set_part_offsets(partitions, part_offsets_h.raw());
prep->create_B2L(A, part, 1);
delete prep;
}
return AMGX_OK;
}
template <class Mat>
void getConsolidationFlags( const Mat *A, int *consolidate_flag, int *cuda_ipc_flag)
{
AMG_Config *rsrc_cfg = A->getResources()->getResourcesConfig();
std::string scope;
rsrc_cfg->getParameter<int>("fine_level_consolidation", *consolidate_flag, "default", scope);
rsrc_cfg->getParameter<int>("use_cuda_ipc_consolidation", *cuda_ipc_flag, "default", scope);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_d> &A, Vector<TConfig_d> &b, Vector<TConfig_d> &x, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Resources *resources = A.getResources();
cudaSetDevice(resources->getDevice(0));
Matrix<TConfig_h> Ah;
Vector<TConfig_h> bh;
Vector<TConfig_h> xh;
I64Vector_h part_offsets_h;
std::string solver_scope, solver_value;
std::string precond_scope, precond_value;
AlgorithmType algorithm_s, algorithm_p;
resources->getResourcesConfig()->getParameter<std::string>("solver", solver_value, "default", solver_scope);
algorithm_s = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", solver_scope);
resources->getResourcesConfig()->getParameter<std::string>("preconditioner", precond_value, solver_scope, precond_scope);
algorithm_p = resources->getResourcesConfig()->getParameter<AlgorithmType>("algorithm", precond_scope);
bool isClassical = false;
// Detect whether one does CLASSICAL or AGGREGATION
if (algorithm_s == CLASSICAL && algorithm_p == CLASSICAL)
{
if (allocated_halo_depth > 2)
{
FatalError("allocated_halo_depth > 2 not supported in CLASSICAL", AMGX_ERR_BAD_PARAMETERS);
}
isClassical = true;
}
else
{
if (allocated_halo_depth > 1)
{
FatalError("allocated_halo_depth > 1 not supported in AGGREGATION", AMGX_ERR_BAD_PARAMETERS);
}
}
//Matrix<TConfig_d>* Ad = new Matrix<TConfig_d>();
//Ad->setResources(A.getResources());
Matrix<TConfig_d> *Ad;
int consolidate_flag, cuda_ipc_flag;
getConsolidationFlags( &A, &consolidate_flag, &cuda_ipc_flag);
if (consolidate_flag != 0 && partitions > 1 && A.get_allow_boundary_separation())
{
Ad = new Matrix<TConfig_d>;
Ad->setResources(resources);
}
else
{
Ad = &A;
}
if (isClassical && consolidate_flag)
{
FatalError("Fine level consolidation not supported in CLASSICAL", AMGX_ERR_BAD_PARAMETERS);
}
// Reset distributed manager
if (A.manager != NULL )
{
delete A.manager;
A.manager = NULL;
}
A.manager = new DistributedManager<TConfig_d>(A);
A.setManagerExternal();
AMGX_ERROR err = DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedReadDeviceInit(fnamec, Ah, *Ad, bh, xh, part_offsets_h, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
if (err != AMGX_OK)
{
return err;
}
int64_t num_rows = Ah.get_num_rows();
int block_dimx = Ah.get_block_dimx();
int block_dimy = Ah.get_block_dimy();
int block_size = block_dimx * block_dimy;
void *x_host = xh.raw(), *b_host = bh.raw();
int sizeof_v_val = (t_vecPrec == AMGX_vecDouble) ? sizeof(double) : sizeof(float);
//int sizeof_v_val = sizeof(typename VecPrecisionMap<t_vecPrec>::Type);
cudaHostRegister(x_host, num_rows * block_dimx * sizeof_v_val, cudaHostRegisterMapped);
cudaHostRegister(b_host, num_rows * block_dimy * sizeof_v_val, cudaHostRegisterMapped);
cudaCheckError();
if (!isClassical)
{
// AGGREGATION path
if (partitions > 1)
{
if (Ad->get_allow_boundary_separation())
{
// TODO: This can be done without exporting of maps
int *btl_sizes = NULL;
int **btl_maps = NULL;
int *lth_sizes = NULL;
int **lth_maps = NULL;
int *neighbors = NULL;
int num_neighbors = Ad->manager->num_neighbors();
neighbors = (int *)malloc((num_neighbors) * sizeof(int));
Ad->manager->malloc_export_maps(&btl_maps, &btl_sizes, <h_maps, <h_sizes);
Ad->manager->export_neighbors(neighbors);
if (A.manager != NULL )
{
delete A.manager;
}
A.manager = new DistributedManager<TConfig_d>(A, 1, 1, num_neighbors, neighbors);
A.manager->cacheMapsOneRing((int const **) btl_maps, (const int *)btl_sizes, (int const **)lth_maps, (const int *)lth_sizes);
A.setManagerExternal();
A.manager->createComms(A.getResources());
A.manager->setAConsolidationFlags(A);
if (A.manager->isFineLevelConsolidated())
{
A.addProps(CSR);
A.setColsReorderedByColor(false);
A.delProps(COO);
A.delProps(DIAG);
A.setColsReorderedByColor(false);
if (Ah.hasProps(DIAG))
{
A.addProps(DIAG);
}
int nnz = Ah.get_num_nz();
typedef typename MatPrecisionMap<t_matPrec>::Type ValueType;
int *row_ptrs = NULL, *col_indices = NULL;
void *values, *diag_data = NULL;
// Use local column indicies now
col_indices = Ad->col_indices.raw();
//row_ptrs = Ad->row_offsets.raw();
// values = Ad->values.raw();
//col_indices = Ad->col_indices.raw();
// row offsets are still global and not reordered
row_ptrs = Ah.row_offsets.raw();
values = Ah.values.raw();
// Do pinning of some buffers since fine level consolidation crashes when only one GPU is used
int sizeof_m_val = sizeof(ValueType);
cudaHostRegister(values, nnz * block_size * sizeof_m_val, cudaHostRegisterMapped);
cudaCheckError();
if (Ah.hasProps(DIAG))
{
//diag_data = (Ad->values.raw() + nnz*block_size);
diag_data = (Ah.values.raw() + nnz * block_size);
cudaHostRegister((void *)diag_data, num_rows * block_size * sizeof_m_val, cudaHostRegisterMapped);
cudaCheckError();
}
/*
cudaHostRegister(col_indices,nnz*sizeof(int),cudaHostRegisterMapped);
cudaCheckError();
*/
cudaHostRegister(row_ptrs, (num_rows + 1)*sizeof(int), cudaHostRegisterMapped);
cudaCheckError();
cudaSetDevice(A.getResources()->getDevice(0));
A.manager->consolidateAndUploadAll(num_rows, nnz, block_dimx, block_dimy, row_ptrs, col_indices, values, diag_data, A);
A.set_initialized(1);
cudaSetDevice(A.getResources()->getDevice(0));
if (diag_data != NULL)
{
cudaHostUnregister(diag_data);
}
cudaHostUnregister(values);
cudaHostUnregister(row_ptrs);
//cudaHostUnregister(col_indices);
cudaCheckError();
delete Ad;
}
else
{
A.manager->createComms(A.getResources());
A.manager->updateMapsReorder();
} // End consolidation check
free_maps_one_ring<int>(num_neighbors, neighbors, btl_sizes, btl_maps, lth_sizes, lth_maps);
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
x.set_is_vector_read_partitioned(true);
x.setManager(*(A.manager));
b.setManager(*(A.manager));
b.set_block_dimx(1);
b.set_block_dimy(block_dimy);
x.set_block_dimx(1);
x.set_block_dimy(block_dimx);
A.manager->transformAndUploadVector(b, b_host, num_rows, b.get_block_dimy());
A.manager->transformAndUploadVector(x, x_host, num_rows, x.get_block_dimy());
cudaHostUnregister(b_host);
cudaHostUnregister(x_host);
cudaDeviceSynchronize();
return AMGX_OK;
} // end of boundary sparation
}
else
{
A.computeDiagonal();
A.set_initialized(1);
A.setView(OWNED);
} // End if partitions>1
}
else
{
// CLASSICAL
/* WARNING: in the classical path, even if a single partition is used it needs
to setup the distributed data structures, because they are later used in the
code. For instance, halo_offsets must be set correctly, otherwise
generateInterpolationMatrix_1x1 -> exchange_halo_2ring -> setup -> do_setup
will fail when accessing them. Therefore, classical path can not be encapsulated
in the if (npartitions > 1) { ... } statement and cl-116816 moves it out. */
if (Ad->get_allow_boundary_separation())
{
A.set_initialized(0);
A.manager->neighbors.resize(0);
A.manager->renumberMatrixOneRing();
A.manager->createOneRingHaloRows();
A.manager->getComms()->set_neighbors(A.manager->num_neighbors());
A.setView(OWNED);
A.set_initialized(1);
A.set_is_matrix_read_partitioned(true);
b.set_is_vector_read_partitioned(true);
x.set_is_vector_read_partitioned(true);
x.setManager(*(A.manager));
b.setManager(*(A.manager));
b.set_block_dimx(1);
b.set_block_dimy(block_dimy);
x.set_block_dimx(1);
x.set_block_dimy(block_dimx);
A.manager->transformAndUploadVector(b, b_host, num_rows, b.get_block_dimy());
A.manager->transformAndUploadVector(x, x_host, num_rows, x.get_block_dimy());
cudaHostUnregister(b_host);
cudaHostUnregister(x_host);
cudaDeviceSynchronize();
return AMGX_OK;
}
}
// just copy remaining data to device
copyPartition<TConfig_h, TConfig_d>(part, partitions, part_offsets_h.raw(), Ah.get_block_dimx(), Ah.get_block_dimy(), bh, xh, b, x);
cudaHostUnregister(b_host);
cudaHostUnregister(x_host);
return AMGX_OK;
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
AMGX_ERROR DistributedRead<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::distributedRead(const char *fnamec, Matrix<TConfig_d> &A, Vector<TConfig_d> &b, int allocated_halo_depth, int part, int partitions, IVector_h &partSize, IVector_h &partitionVec, unsigned int props)
{
Vector<TConfig_d> v = Vector<TConfig_d>(0);
if (io_config::hasProps(io_config::RHS, props))
{
return distributedRead(fnamec, A, b, v, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
else
{
return distributedRead(fnamec, A, v, b, allocated_halo_depth, part, partitions, partSize, partitionVec, props);
}
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class DistributedRead<TemplateMode<CASE>::Type >;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
|
8dcd25f8156db3aecd1cc124425a0627a9ea8218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffeine/im2col.hpp"
#include "common/cuda.hpp"
namespace caffe {
__global__ void im2col_gpu_kernel(const int n, const DTYPE* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
DTYPE* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
DTYPE* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const DTYPE* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(const DTYPE* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
DTYPE* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
template <bool add>
__global__ void col2im_gpu_kernel(const int n, const DTYPE* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
DTYPE* data_im) {
CUDA_KERNEL_LOOP(index, n) {
DTYPE val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
if (add) {
data_im[index] += val;
} else {
data_im[index] = val;
}
}
}
void col2im_gpu(const DTYPE* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, DTYPE* data_im, bool add) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
if (add) {
hipLaunchKernelGGL(( col2im_gpu_kernel<true>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
} else {
hipLaunchKernelGGL(( col2im_gpu_kernel<false>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
}
} // namespace caffe
| 8dcd25f8156db3aecd1cc124425a0627a9ea8218.cu | #include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffeine/im2col.hpp"
#include "common/cuda.hpp"
namespace caffe {
__global__ void im2col_gpu_kernel(const int n, const DTYPE* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
DTYPE* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
DTYPE* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const DTYPE* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(const DTYPE* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
DTYPE* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
template <bool add>
__global__ void col2im_gpu_kernel(const int n, const DTYPE* data_col,
const int height, const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
DTYPE* data_im) {
CUDA_KERNEL_LOOP(index, n) {
DTYPE val = 0;
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * patch_h * patch_w + (h - h_col * stride_h) * ksize
+ (w - w_col * stride_w);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
if (add) {
data_im[index] += val;
} else {
data_im[index] = val;
}
}
}
void col2im_gpu(const DTYPE* data_col, const int channels,
const int height, const int width, const int patch_h, const int patch_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, DTYPE* data_im, bool add) {
int height_col = (height + 2 * pad_h - patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - patch_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
if (add) {
col2im_gpu_kernel<true><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
} else {
col2im_gpu_kernel<false><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, patch_h, patch_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
}
} // namespace caffe
|
c68d0eb4ca2e17ca0ab84dae8ba937b1fa65f65b.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "deform_im2col_hip.cuh"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// #include <THH/THH.h>
// #include <THH/THHAtomics.cuh>
// #include <THH/THHDeviceUtils.cuh>
// extern THCState *state;
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
at::Tensor
deform_conv_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int im2col_step_ = ::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] {
deformable_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
}));
// auto columns_m = columns.t();
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// output = at::addmm(bias, columns_m, weight_m);
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous();
return output;
}
std::vector<at::Tensor> deform_conv_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &grad_output,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int batch_ = grad_output.size(0);
const int channels_out_ = grad_output.size(1);
const int height_out_ = grad_output.size(2);
const int width_out_ = grad_output.size(3);
const int im2col_step_ = ::min(im2col_step, batch);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
AT_ASSERTM(batch == batch_,
"Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_);
AT_ASSERTM(channels_out == channels_out_,
"Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_);
AT_ASSERTM(height_out == height_out_ && width_out == width_out_,
"Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_);
auto grad_input = at::zeros_like(input);
auto grad_offset = at::zeros_like(offset);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// columns = at::mm(weight_m, grad_output_m);
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_bias_g = grad_bias.view({group, channels_out/group});
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out});
auto ones = at::ones({batch_n * height_out * width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options());
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm);
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] {
deformable_col2im_coord_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data<scalar_t>(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset.data<scalar_t>() + n * im2col_step_ * per_offset_size);
// gradient w.r.t. input data
deformable_col2im_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data<scalar_t>(),
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input.data<scalar_t>() + n * im2col_step_ * per_input_size);
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
deformable_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
}));
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight);
// grad_bias = at::mv(grad_output_m, ones);
// auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out});
// auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto columns_gm = columns_g.select(0, g).t();
auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w});
auto grad_bias_gm = grad_bias_g.select(0, g);
grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g));
grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones);
}
}
return {
grad_input, grad_offset, grad_weight, grad_bias
};
}
| c68d0eb4ca2e17ca0ab84dae8ba937b1fa65f65b.cu | #include <vector>
#include "deform_im2col_cuda.cuh"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
// #include <THC/THC.h>
// #include <THC/THCAtomics.cuh>
// #include <THC/THCDeviceUtils.cuh>
// extern THCState *state;
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
at::Tensor
deform_conv_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] {
deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
}));
// auto columns_m = columns.t();
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// output = at::addmm(bias, columns_m, weight_m);
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous();
return output;
}
std::vector<at::Tensor> deform_conv_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &grad_output,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int batch_ = grad_output.size(0);
const int channels_out_ = grad_output.size(1);
const int height_out_ = grad_output.size(2);
const int width_out_ = grad_output.size(3);
const int im2col_step_ = std::min(im2col_step, batch);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
AT_ASSERTM(batch == batch_,
"Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_);
AT_ASSERTM(channels_out == channels_out_,
"Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_);
AT_ASSERTM(height_out == height_out_ && width_out == width_out_,
"Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_);
auto grad_input = at::zeros_like(input);
auto grad_offset = at::zeros_like(offset);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t();
// columns = at::mm(weight_m, grad_output_m);
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto grad_bias_g = grad_bias.view({group, channels_out/group});
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out});
auto ones = at::ones({batch_n * height_out * width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options());
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm);
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] {
deformable_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(),
columns.data<scalar_t>(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset.data<scalar_t>() + n * im2col_step_ * per_offset_size);
// gradient w.r.t. input data
deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
columns.data<scalar_t>(),
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input.data<scalar_t>() + n * im2col_step_ * per_input_size);
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
}));
// auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out});
// grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight);
// grad_bias = at::mv(grad_output_m, ones);
// auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out});
// auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out});
auto columns_gm = columns_g.select(0, g).t();
auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w});
auto grad_bias_gm = grad_bias_g.select(0, g);
grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g));
grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones);
}
}
return {
grad_input, grad_offset, grad_weight, grad_bias
};
}
|
b2938c99fc5b2156509ef6994f5365e786d42e59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
using namespace std;
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if(err!=hipSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// inline double seconds()
// {
// struct timeval tp;
// struct timezone tzp;
// int i = gettimeofday(&tp, &tzp);
// return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
// }
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
hipEvent_t start, stop;
SAFE_CALL(hipEventCreate(&start), "Error creating start event");
SAFE_CALL(hipEventCreate(&stop), "Error creating stop event");
// Size of vectors
int n = 1<<23;
// Host input vectors
float *h_a;
float *h_b;
//Host output vector
float *h_c;
// Device input vectors
float *d_a;
float *d_b;
//Device output vector
float *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(float);
// Allocate memory for each vector on host
h_a = (float*)malloc(bytes);
h_b = (float*)malloc(bytes);
h_c = (float*)malloc(bytes);
// Allocate memory for each vector on GPU
SAFE_CALL(hipMalloc(&d_a, bytes), "Error allocating da");
SAFE_CALL(hipMalloc(&d_b, bytes), "Error allocating db");
SAFE_CALL(hipMalloc(&d_c, bytes), "Error allocating dc");
// Initialize vectors on host
for(int i = 0; i < n; i++ ) {
h_a[i] = 1 ;
h_b[i] = 1 ;
}
// Copy host vectors to device
SAFE_CALL(hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice), "Error copying ha -> da");
SAFE_CALL(hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice), "Error copying hb -> db");
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
printf("Gridsize: %d Blocksize: %d\n", gridSize, blockSize);
auto start_cpu = chrono::high_resolution_clock::now();
SAFE_CALL(hipEventRecord(start, 0), "Error recording event");
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
SAFE_CALL(hipDeviceSynchronize(),"Kernel Launch Failed");
auto end_cpu = chrono::high_resolution_clock::now();
SAFE_CALL(hipEventRecord(stop, 0), "Error recording event stop");
SAFE_CALL(hipEventSynchronize(stop), "Error synchronizing events");
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
float elapsedTime;
SAFE_CALL(hipEventElapsedTime(&elapsedTime, start, stop), "Error calculating elapsed time");
printf("Time spent for %d elements: %.5f ms; %f\n",n, elapsedTime, duration_ms.count());
// Copy array back to host
SAFE_CALL(hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost ), "Error copying dc -> hc");
// Sum up vector c and print result divided by n, this should equal 1 within error
float sum = 0;
for(int i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f \n", sum/n);
hipEventDestroy(start);
hipEventDestroy(stop);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| b2938c99fc5b2156509ef6994f5365e786d42e59.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
using namespace std;
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if(err!=cudaSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// inline double seconds()
// {
// struct timeval tp;
// struct timezone tzp;
// int i = gettimeofday(&tp, &tzp);
// return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
// }
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
cudaEvent_t start, stop;
SAFE_CALL(cudaEventCreate(&start), "Error creating start event");
SAFE_CALL(cudaEventCreate(&stop), "Error creating stop event");
// Size of vectors
int n = 1<<23;
// Host input vectors
float *h_a;
float *h_b;
//Host output vector
float *h_c;
// Device input vectors
float *d_a;
float *d_b;
//Device output vector
float *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(float);
// Allocate memory for each vector on host
h_a = (float*)malloc(bytes);
h_b = (float*)malloc(bytes);
h_c = (float*)malloc(bytes);
// Allocate memory for each vector on GPU
SAFE_CALL(cudaMalloc(&d_a, bytes), "Error allocating da");
SAFE_CALL(cudaMalloc(&d_b, bytes), "Error allocating db");
SAFE_CALL(cudaMalloc(&d_c, bytes), "Error allocating dc");
// Initialize vectors on host
for(int i = 0; i < n; i++ ) {
h_a[i] = 1 ;
h_b[i] = 1 ;
}
// Copy host vectors to device
SAFE_CALL(cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice), "Error copying ha -> da");
SAFE_CALL(cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice), "Error copying hb -> db");
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
printf("Gridsize: %d Blocksize: %d\n", gridSize, blockSize);
auto start_cpu = chrono::high_resolution_clock::now();
SAFE_CALL(cudaEventRecord(start, 0), "Error recording event");
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
auto end_cpu = chrono::high_resolution_clock::now();
SAFE_CALL(cudaEventRecord(stop, 0), "Error recording event stop");
SAFE_CALL(cudaEventSynchronize(stop), "Error synchronizing events");
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
float elapsedTime;
SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop), "Error calculating elapsed time");
printf("Time spent for %d elements: %.5f ms; %f\n",n, elapsedTime, duration_ms.count());
// Copy array back to host
SAFE_CALL(cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost ), "Error copying dc -> hc");
// Sum up vector c and print result divided by n, this should equal 1 within error
float sum = 0;
for(int i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f \n", sum/n);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
d4a1c786b1aae914cb9a8b9947b7a597a322c28d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* normalizedCrossCorrelation.cu
*
* Microbenchmark for normalized cross correlation, a template-
* matching algorithm for computer vision.
*
* Build with: nvcc -I ../chLib <options> normalizedCrossCorrelation.cu ..\chLib\pgm.cu
*
* Make sure to include pgm.cu for the image file I/O support.
*
* To avoid warnings about double precision support, specify the
* target gpu-architecture, e.g.:
* nvcc --gpu-architecture sm_13 -I ../chLib <options> normalizedCrossCorrelation.cu pgm.cu
*
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chAssert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include "pgm.h"
texture<unsigned char, 2> texImage;
texture<unsigned char, 2> texTemplate;
const int maxTemplatePixels = 3072;
__constant__ int g_xOffset[maxTemplatePixels];
__constant__ int g_yOffset[maxTemplatePixels];
__constant__ unsigned char g_Tpix[maxTemplatePixels];
__constant__ float g_cPixels, g_SumT, g_fDenomExp;
unsigned int g_cpuSumT, g_cpuSumTSq;
const float fThreshold = 1e-3f;
#define INTCEIL(a,b) ( ((a)+(b)-1) / (b) )
__device__ __host__ inline float
CorrelationValue( float SumI, float SumISq, float SumIT, float SumT, float cPixels, float fDenomExp )
{
float Numerator = cPixels*SumIT - SumI*SumT;
float Denominator = rsqrtf( (cPixels*SumISq - SumI*SumI)*fDenomExp );
return Numerator * Denominator;
}
#include "corrTexTexSums.cuh"
#include "corrTexTex.cuh"
#include "corrTexConstantSums.cuh"
#include "corrTexConstant.cuh"
extern __shared__ unsigned char LocalBlock[];
#include "corrSharedSMSums.cuh"
#include "corrSharedSM.cuh"
#include "corrSharedSums.cuh"
#include "corrShared.cuh"
#include "corrShared4Sums.cuh"
#include "corrShared4.cuh"
int poffsetx[maxTemplatePixels];
int poffsety[maxTemplatePixels];
hipError_t
CopyToTemplate(
unsigned char *img, size_t imgPitch,
int xTemplate, int yTemplate,
int wTemplate, int hTemplate,
int OffsetX, int OffsetY
)
{
hipError_t status;
unsigned char pixels[maxTemplatePixels];
int inx = 0;
int SumT = 0;
int SumTSq = 0;
int cPixels = wTemplate*hTemplate;
size_t sizeOffsets = cPixels*sizeof(int);
float fSumT, fDenomExp, fcPixels;
cuda(Memcpy2D(
pixels, wTemplate,
img+yTemplate*imgPitch+xTemplate, imgPitch,
wTemplate, hTemplate,
hipMemcpyDeviceToHost ) );
cuda(MemcpyToSymbol( g_Tpix, pixels, cPixels ) );
for ( int i = OffsetY; i < OffsetY+hTemplate; i++ ) {
for ( int j = OffsetX; j < OffsetX+wTemplate; j++) {
SumT += pixels[inx];
SumTSq += pixels[inx]*pixels[inx];
poffsetx[inx] = j;
poffsety[inx] = i;
inx += 1;
}
}
g_cpuSumT = SumT;
g_cpuSumTSq = SumTSq;
cuda(MemcpyToSymbol(g_xOffset, poffsetx, sizeOffsets) );
cuda(MemcpyToSymbol(g_yOffset, poffsety, sizeOffsets) );
fSumT = (float) SumT;
cuda(MemcpyToSymbol(g_SumT, &fSumT, sizeof(float)) );
fDenomExp = float( (double)cPixels*SumTSq - (double) SumT*SumT);
cuda(MemcpyToSymbol(g_fDenomExp, &fDenomExp, sizeof(float)) );
fcPixels = (float) cPixels;
cuda(MemcpyToSymbol(g_cPixels, &fcPixels, sizeof(float)) );
Error:
return status;
}
int
bCompareCorrValues( const float *pBase0,
const float *pBase1,
int w, int h )
{
for ( int j = 0; j < h; j++ ) {
float *pf0 = (float *) ((char *) pBase0+j*w*sizeof(float));
float *pf1 = (float *) ((char *) pBase1+j*w*sizeof(float));
for ( int i = 0; i < w; i++ ) {
if ( fabsf(pf0[i]-pf1[i]) > fThreshold ) {
printf( "Mismatch pf0[%d] = %.5f, pf1[%d] = %.5f\n", i, pf0[i], i, pf1[i] );
fflush( stdout );
//CH_ASSERT(0);
return 1;
}
}
}
return 0;
}
int
bCompareSums( const int *pBaseI0, const int *pBaseISq0, const int *pBaseIT0,
const int *pBaseI1, const int *pBaseISq1, const int *pBaseIT1,
int w, int h )
{
for ( int j = 0; j < h; j++ ) {
const int *pi0 = (const int *) ((char *) pBaseI0+j*w*sizeof(int));
const int *pi1 = (const int *) ((char *) pBaseI1+j*w*sizeof(int));
const int *pisq0 = (const int *) ((char *) pBaseISq0+j*w*sizeof(int));
const int *pisq1 = (const int *) ((char *) pBaseISq1+j*w*sizeof(int));
const int *pit0 = (const int *) ((char *) pBaseIT0+j*w*sizeof(int));
const int *pit1 = (const int *) ((char *) pBaseIT1+j*w*sizeof(int));
for ( int i = 0; i < w; i++ ) {
if ( pi0[i] != pi1[i] ||
pisq0[i] != pisq1[i] ||
pit0[i] != pit1[i] ) {
printf( "Mismatch pi[%d] = %d, reference = %d\n", i, pi0[i], pi1[i] );
printf( "Mismatch pisq[%d] = %d, reference = %d\n", i, pisq0[i], pisq1[i] );
printf( "Mismatch pit[%d] = %d, reference = %d\n", i, pit0[i], pit1[i] );
fflush( stdout );
//CH_ASSERT(0);
return 1;
}
}
}
return 0;
}
unsigned char
ReadPixel( unsigned char *base, int pitch, int w, int h, int x, int y )
{
if ( x < 0 ) x = 0;
if ( x >= w ) x = w-1;
if ( y < 0 ) y = 0;
if ( y >= h ) y = h-1;
return base[y*pitch+x];
}
void
corrCPU( float *pCorr,
int *_pI, int *_pISq, int *_pIT,
size_t CorrPitch,
int cPixels,
int xTemplate, int yTemplate,
int w, int h,
unsigned char *img, int imgPitch,
unsigned char *tmp, int tmpPitch )
{
for ( int row = 0; row < h; row += 1 ) {
float *pOut = (float *) (((char *) pCorr)+row*CorrPitch);
int *pI = (int *) (((char *) _pI)+row*CorrPitch);
int *pISq = (int *) (((char *) _pISq)+row*CorrPitch);
int *pIT = (int *) (((char *) _pIT)+row*CorrPitch);
for ( int col = 0; col < w; col += 1 ) {
int SumI = 0;
int SumT = 0;
int SumISq = 0;
int SumTSq = 0;
int SumIT = 0;
for ( int j = 0; j < cPixels; j++ ) {
unsigned char I = ReadPixel( img, imgPitch, w, h, col+poffsetx[j], row+poffsety[j] );
unsigned char T = ReadPixel( tmp, tmpPitch, w, h, xTemplate+poffsetx[j], yTemplate+poffsety[j] );
SumI += I;
SumT += T;
SumISq += I*I;
SumTSq += T*T;
SumIT += I*T;
}
float fDenomExp = float((double) cPixels*SumTSq - (double) SumT*SumT);
pI[col] = SumI;
pISq[col] = SumISq;
pIT[col] = SumIT;
pOut[col] = CorrelationValue( (float) SumI, (float) SumISq, (float) SumIT, (float) SumT, (float) cPixels, fDenomExp );
}
}
}
bool
TestCorrelation(
double *pixelsPerSecond, // passbacks to report performance
double *templatePixelsPerSecond, //
int xOffset, int yOffset, // offset into image
int w, int h, // width and height of output
const float *hrefCorr, // host reference data
const int *hrefSumI,
const int *hrefSumISq,
const int *hrefSumIT,
int xTemplate, int yTemplate, // reference point in template image
int wTemplate, int hTemplate,
int wTile, // width of image tile
int sharedPitch, int sharedMem,
dim3 threads, dim3 blocks,
void (*pfnCorrelationSums)(
float *dCorr, int CorrPitch,
int *dSumI, int *dSumISq, int *dSumIT,
int wTile,
int wTemplate, int hTemplate,
float cPixels,
float fDenomExp,
int sharedPitch,
int xOffset, int yOffset,
int xTemplate, int yTemplate,
int xUL, int yUL, int w, int h,
dim3 threads, dim3 blocks,
int sharedMem ),
void (*pfnCorrelation)(
float *dCorr, int CorrPitch,
int wTile,
int wTemplate, int hTemplate,
float cPixels,
float fDenomExp,
int sharedPitch,
int xOffset, int yOffset,
int xTemplate, int yTemplate,
int xUL, int yUL, int w, int h,
dim3 threads, dim3 blocks,
int sharedMem ),
bool bPrintNeighborhood = false,
int cIterations = 1,
const char *outputFilename = NULL
)
{
hipError_t status;
bool ret = false;
size_t CorrPitch;
float cPixels = (float) wTemplate*hTemplate;
float fDenomExp = float((double) cPixels*g_cpuSumTSq - (double) g_cpuSumT*g_cpuSumT);
float *hCorr = NULL, *dCorr = NULL;
int *hSumI = NULL, *dSumI = NULL;
int *hSumISq = NULL, *dSumISq = NULL;
int *hSumIT = NULL, *dSumIT = NULL;
hipEvent_t start = 0, stop = 0;
hCorr = (float *) malloc( w*sizeof(float)*h );
hSumI = (int *) malloc( w*sizeof(int)*h );
hSumISq = (int *) malloc( w*sizeof(int)*h );
hSumIT = (int *) malloc( w*sizeof(int)*h );
if ( NULL == hCorr || NULL == hSumI || NULL == hSumISq || NULL == hSumIT )
goto Error;
cuda(MallocPitch( (void **) &dCorr, &CorrPitch, w*sizeof(float), h ) );
cuda(MallocPitch( (void **) &dSumI, &CorrPitch, w*sizeof(int), h ) );
cuda(MallocPitch( (void **) &dSumISq, &CorrPitch, w*sizeof(int), h ) );
cuda(MallocPitch( (void **) &dSumIT, &CorrPitch, w*sizeof(int), h ) );
cuda(Memset( dCorr, 0, CorrPitch*h ) );
cuda(Memset( dSumI, 0, CorrPitch*h ) );
cuda(Memset( dSumISq, 0, CorrPitch*h ) );
cuda(Memset( dSumIT, 0, CorrPitch*h ) );
cuda(EventCreate( &start, 0 ) );
cuda(EventCreate( &stop, 0 ) );
pfnCorrelationSums(
dCorr, CorrPitch,
dSumI, dSumISq, dSumIT,
wTile,
wTemplate, hTemplate,
cPixels, fDenomExp,
sharedPitch,
xOffset, yOffset,
xTemplate, yTemplate,
0, 0, w, h,
threads, blocks, sharedMem );
cuda(Memcpy2D( hSumI, w*sizeof(int), dSumI, CorrPitch, w*sizeof(int), h, hipMemcpyDeviceToHost ) );
cuda(Memcpy2D( hSumISq, w*sizeof(int), dSumISq, CorrPitch, w*sizeof(int), h, hipMemcpyDeviceToHost ) );
cuda(Memcpy2D( hSumIT, w*sizeof(int), dSumIT, CorrPitch, w*sizeof(int), h, hipMemcpyDeviceToHost ) );
if ( bCompareSums( hSumI, hSumISq, hSumIT,
hrefSumI, hrefSumISq, hrefSumIT,
w, h ) ) {
//CH_ASSERT(0);
printf( "Sums miscompare\n" );
goto Error;
}
cuda(Memcpy2D( hCorr, w*sizeof(float), dCorr, CorrPitch, w*sizeof(float), h, hipMemcpyDeviceToHost ) );
if ( bCompareCorrValues( hrefCorr, hCorr, w, h ) ) {
//CH_ASSERT(0);
printf( "Correlation coefficients generated by sums kernel mismatch\n" );
return 1;
}
cuda(Memset2D( dCorr, CorrPitch, 0, w*sizeof(float), h ) );
cuda(DeviceSynchronize() );
cuda(EventRecord( start, 0 ) );
for ( int i = 0; i < cIterations; i++ ) {
pfnCorrelation(
dCorr, CorrPitch,
wTile,
wTemplate, hTemplate,
cPixels, fDenomExp,
sharedPitch,
xOffset, yOffset,
xTemplate, yTemplate,
0, 0, w, h,
threads, blocks, sharedMem );
}
cuda(EventRecord( stop, 0 ) );
cuda(Memcpy2D( hCorr, w*sizeof(float), dCorr, CorrPitch, w*sizeof(float), h, hipMemcpyDeviceToHost ) );
if ( bCompareCorrValues( hrefCorr, hCorr, w, h ) ) {
CH_ASSERT(0);
printf( "Correlation coefficients generated by coefficient-only kernel mismatch\n" );
return 1;
}
{
float ms;
cuda(EventElapsedTime( &ms, start, stop ) );
*pixelsPerSecond = (double) w*h*cIterations*1000.0 / ms;
*templatePixelsPerSecond = *pixelsPerSecond*wTemplate*hTemplate;
}
if ( bPrintNeighborhood ) {
printf( "\nNeighborhood around template:\n" );
for ( int VertOffset = -4; VertOffset <= 4; VertOffset++ ) {
const float *py = hrefCorr+w*(VertOffset+yTemplate);
for ( int HorzOffset = -4; HorzOffset <= 4; HorzOffset++ ) {
printf( "%6.2f", py[xTemplate+HorzOffset] );
}
printf("\n");
}
}
if ( outputFilename ) {
unsigned char *correlationValues = (unsigned char *) malloc( w*h );
if ( ! correlationValues ) {
status = hipErrorMemoryAllocation;
goto Error;
}
for ( int row = 0; row < h; row++ ) {
for ( int col = 0; col < w; col++ ) {
int index = row*w+col;
float value = hCorr[index] < 0.0f ? 0.0f : logf( 1.0f+hCorr[index] )/logf(2.0f);
if ( value < 0.5f ) value = 0.0f;
value = 2.0f * (value - 0.5f);
correlationValues[index] = (unsigned char) (255.0f*value+0.5f);
}
}
if ( 0 != pgmSave( outputFilename, correlationValues, w, h ) ) {
status = hipErrorUnknown;
goto Error;
}
free( correlationValues );
}
ret = true;
Error:
hipEventDestroy( start );
hipEventDestroy( stop );
free( hCorr );
free( hSumI );
free( hSumISq );
free( hSumIT );
if ( dCorr ) hipFree( dCorr );
if ( dSumI ) hipFree( dSumI );
if ( dSumI ) hipFree( dSumISq );
if ( dSumI ) hipFree( dSumIT );
return ret;
}
int
main(int argc, char *argv[])
{
int ret = 1;
hipError_t status;
unsigned char *hidata = NULL;
unsigned char *didata = NULL;
float *hoCorrCPU = NULL;
int *hoCorrCPUI = NULL;
int *hoCorrCPUISq = NULL;
int *hoCorrCPUIT = NULL;
unsigned int HostPitch, DevicePitch;
int w, h;
int wTemplate = 52;
int hTemplate = 52;
int xOffset, yOffset;
int xTemplate = 210;
int yTemplate = 148;
int wTile;
dim3 threads;
dim3 blocks;
int sharedPitch;
int sharedMem;
char defaultInputFilename[] = "coins.pgm";
char *inputFilename = defaultInputFilename;
char *outputFilename = NULL;
hipArray *pArrayImage = NULL;
hipArray *pArrayTemplate = NULL;
hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
if ( chCommandLineGetBool( "help", argc, argv ) ) {
printf( "Usage:\n" );
printf( " --input <filename>: specify input filename (must be PGM)\n" );
printf( " --output <filename>: Write PGM of correlation values (0..255) to <filename>.\n" );
printf( " --padWidth <value>: pad input image width to specified value\n" );
printf( " --padHeight <value>: pad input image height to specified value\n" );
printf( " --xTemplate <value>: X coordinate of upper left corner of template\n" );
printf( " --yTemplate <value>: Y coordinate of upper left corner of template\n" );
printf( " --wTemplate <value>: Width of template\n" );
printf( " --hTemplate <value>: Height of template\n" );
printf( "\nDefault values are coins.pgm, no output file or padding, and template of the dime in the\n" );
printf("lower right corner of coins.pgm: xTemplate=210, yTemplate=148, wTemplate=hTemplate=52\n" );
return 0;
}
cuda(SetDeviceFlags( hipDeviceMapHost ) );
cuda(DeviceSetCacheConfig( hipFuncCachePreferShared ) );
if ( chCommandLineGet( &inputFilename, "input", argc, argv ) ) {
printf( "Reading from image file %s\n", inputFilename );
}
chCommandLineGet( &outputFilename, "output", argc, argv );
{
int padWidth = 0;
int padHeight = 0;
if ( chCommandLineGet( &padWidth, "padWidth", argc, argv ) ) {
if ( ! chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
else {
if ( chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
if ( pgmLoad(inputFilename, &hidata, &HostPitch, &didata, &DevicePitch, &w, &h, padWidth, padHeight) )
goto Error;
}
chCommandLineGet( &xTemplate, "xTemplate", argc, argv );
chCommandLineGet( &yTemplate, "yTemplate", argc, argv );
chCommandLineGet( &wTemplate, "wTemplate", argc, argv );
chCommandLineGet( &hTemplate, "hTemplate", argc, argv );
xOffset = -wTemplate/2;
yOffset = -wTemplate/2;
hoCorrCPU = (float *) malloc(w*h*sizeof(float)); if ( ! hoCorrCPU ) return 1;
hoCorrCPUI = (int *) malloc(w*h*sizeof(int)); if ( ! hoCorrCPUI ) return 1;
hoCorrCPUISq = (int *) malloc(w*h*sizeof(int)); if ( ! hoCorrCPUISq ) return 1;
hoCorrCPUIT = (int *) malloc(w*h*sizeof(int)); if ( ! hoCorrCPUIT ) return 1;
if ( NULL == hoCorrCPU ||
NULL == hoCorrCPUI ||
NULL == hoCorrCPUISq ||
NULL == hoCorrCPUIT )
goto Error;
cuda(MallocArray( &pArrayImage, &desc, w, h ) );
cuda(MallocArray( &pArrayTemplate, &desc, w, h ) );
cuda(MemcpyToArray( pArrayImage, 0, 0, hidata, w*h, hipMemcpyHostToDevice ) );
cuda(Memcpy2DArrayToArray( pArrayTemplate, 0, 0, pArrayImage, 0, 0, w, h, hipMemcpyDeviceToDevice ) );
cuda(BindTextureToArray( texImage, pArrayImage ) );
cuda(BindTextureToArray( texTemplate, pArrayTemplate ) );
CopyToTemplate( didata, DevicePitch,
xTemplate, yTemplate,
wTemplate, hTemplate,
xOffset, yOffset );
corrCPU( hoCorrCPU, hoCorrCPUI, hoCorrCPUISq, hoCorrCPUIT,
w*sizeof(float), wTemplate*hTemplate, xTemplate-xOffset, yTemplate-yOffset, w, h,
hidata, HostPitch, hidata, HostPitch );
// height of thread block must be >= hTemplate
wTile = 32;
threads = dim3(32,8);
blocks = dim3(w/wTile+(0!=w%wTile),h/threads.y+(0!=h%threads.y));
sharedPitch = ~63&(wTile+wTemplate+63);
sharedMem = sharedPitch*(threads.y+hTemplate);
#define TEST_VECTOR( baseName, bPrintNeighborhood, cIterations, outfile ) \
{ \
double pixelsPerSecond; \
double templatePixelsPerSecond; \
if ( ! TestCorrelation( &pixelsPerSecond, \
&templatePixelsPerSecond, \
xOffset, yOffset, \
w, h, \
hoCorrCPU, \
hoCorrCPUI, \
hoCorrCPUISq, \
hoCorrCPUIT, \
xTemplate-xOffset, yTemplate-yOffset, \
wTemplate, hTemplate, \
wTile, sharedPitch, sharedMem, \
threads, blocks, \
baseName##Sums, \
baseName, \
bPrintNeighborhood, cIterations, outfile ) ) { \
printf( "Error\n" ); \
} \
printf( "%s: %.2f Mpix/s\t%.2fGtpix/s\n", \
#baseName, pixelsPerSecond/1e6, templatePixelsPerSecond/1e9 ); \
}
TEST_VECTOR( corrShared, false, 100, NULL );
// height of thread block must be >= hTemplate
wTile = 32;
threads = dim3(32,8);
blocks = dim3(w/wTile+(0!=w%wTile),h/threads.y+(0!=h%threads.y));
sharedPitch = ~63&(((wTile+wTemplate)+63));
sharedMem = sharedPitch*(threads.y+hTemplate);
TEST_VECTOR( corrSharedSM, false, 100, NULL );
TEST_VECTOR( corrShared4, false, 100, NULL );
// set up blocking parameters for 2D tex-constant formulation
threads.x = 32; threads.y = 16; threads.z = 1;
blocks.x = INTCEIL(w,threads.x); blocks.y = INTCEIL(h,threads.y); blocks.z = 1;
TEST_VECTOR( corrTexConstant, false, 100, NULL );
if ( outputFilename ) {
printf( "Writing graymap of correlation values to %s\n", outputFilename );
}
// set up blocking parameters for 2D tex-tex formulation
threads.x = 16; threads.y = 8; threads.z = 1;
blocks.x = INTCEIL(w,threads.x); blocks.y = INTCEIL(h,threads.y); blocks.z = 1;
TEST_VECTOR( corrTexTex, false, 100, outputFilename );
ret = 0;
Error:
free( hoCorrCPU );
free( hoCorrCPUI );
free( hoCorrCPUISq );
free( hoCorrCPUIT );
free( hidata );
hipFree(didata);
hipFreeArray(pArrayImage);
hipFreeArray(pArrayTemplate);
return ret;
}
| d4a1c786b1aae914cb9a8b9947b7a597a322c28d.cu | /*
*
* normalizedCrossCorrelation.cu
*
* Microbenchmark for normalized cross correlation, a template-
* matching algorithm for computer vision.
*
* Build with: nvcc -I ../chLib <options> normalizedCrossCorrelation.cu ..\chLib\pgm.cu
*
* Make sure to include pgm.cu for the image file I/O support.
*
* To avoid warnings about double precision support, specify the
* target gpu-architecture, e.g.:
* nvcc --gpu-architecture sm_13 -I ../chLib <options> normalizedCrossCorrelation.cu pgm.cu
*
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chAssert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include "pgm.h"
texture<unsigned char, 2> texImage;
texture<unsigned char, 2> texTemplate;
const int maxTemplatePixels = 3072;
__constant__ int g_xOffset[maxTemplatePixels];
__constant__ int g_yOffset[maxTemplatePixels];
__constant__ unsigned char g_Tpix[maxTemplatePixels];
__constant__ float g_cPixels, g_SumT, g_fDenomExp;
unsigned int g_cpuSumT, g_cpuSumTSq;
const float fThreshold = 1e-3f;
#define INTCEIL(a,b) ( ((a)+(b)-1) / (b) )
__device__ __host__ inline float
CorrelationValue( float SumI, float SumISq, float SumIT, float SumT, float cPixels, float fDenomExp )
{
float Numerator = cPixels*SumIT - SumI*SumT;
float Denominator = rsqrtf( (cPixels*SumISq - SumI*SumI)*fDenomExp );
return Numerator * Denominator;
}
#include "corrTexTexSums.cuh"
#include "corrTexTex.cuh"
#include "corrTexConstantSums.cuh"
#include "corrTexConstant.cuh"
extern __shared__ unsigned char LocalBlock[];
#include "corrSharedSMSums.cuh"
#include "corrSharedSM.cuh"
#include "corrSharedSums.cuh"
#include "corrShared.cuh"
#include "corrShared4Sums.cuh"
#include "corrShared4.cuh"
int poffsetx[maxTemplatePixels];
int poffsety[maxTemplatePixels];
cudaError_t
CopyToTemplate(
unsigned char *img, size_t imgPitch,
int xTemplate, int yTemplate,
int wTemplate, int hTemplate,
int OffsetX, int OffsetY
)
{
cudaError_t status;
unsigned char pixels[maxTemplatePixels];
int inx = 0;
int SumT = 0;
int SumTSq = 0;
int cPixels = wTemplate*hTemplate;
size_t sizeOffsets = cPixels*sizeof(int);
float fSumT, fDenomExp, fcPixels;
cuda(Memcpy2D(
pixels, wTemplate,
img+yTemplate*imgPitch+xTemplate, imgPitch,
wTemplate, hTemplate,
cudaMemcpyDeviceToHost ) );
cuda(MemcpyToSymbol( g_Tpix, pixels, cPixels ) );
for ( int i = OffsetY; i < OffsetY+hTemplate; i++ ) {
for ( int j = OffsetX; j < OffsetX+wTemplate; j++) {
SumT += pixels[inx];
SumTSq += pixels[inx]*pixels[inx];
poffsetx[inx] = j;
poffsety[inx] = i;
inx += 1;
}
}
g_cpuSumT = SumT;
g_cpuSumTSq = SumTSq;
cuda(MemcpyToSymbol(g_xOffset, poffsetx, sizeOffsets) );
cuda(MemcpyToSymbol(g_yOffset, poffsety, sizeOffsets) );
fSumT = (float) SumT;
cuda(MemcpyToSymbol(g_SumT, &fSumT, sizeof(float)) );
fDenomExp = float( (double)cPixels*SumTSq - (double) SumT*SumT);
cuda(MemcpyToSymbol(g_fDenomExp, &fDenomExp, sizeof(float)) );
fcPixels = (float) cPixels;
cuda(MemcpyToSymbol(g_cPixels, &fcPixels, sizeof(float)) );
Error:
return status;
}
int
bCompareCorrValues( const float *pBase0,
const float *pBase1,
int w, int h )
{
for ( int j = 0; j < h; j++ ) {
float *pf0 = (float *) ((char *) pBase0+j*w*sizeof(float));
float *pf1 = (float *) ((char *) pBase1+j*w*sizeof(float));
for ( int i = 0; i < w; i++ ) {
if ( fabsf(pf0[i]-pf1[i]) > fThreshold ) {
printf( "Mismatch pf0[%d] = %.5f, pf1[%d] = %.5f\n", i, pf0[i], i, pf1[i] );
fflush( stdout );
//CH_ASSERT(0);
return 1;
}
}
}
return 0;
}
int
bCompareSums( const int *pBaseI0, const int *pBaseISq0, const int *pBaseIT0,
const int *pBaseI1, const int *pBaseISq1, const int *pBaseIT1,
int w, int h )
{
for ( int j = 0; j < h; j++ ) {
const int *pi0 = (const int *) ((char *) pBaseI0+j*w*sizeof(int));
const int *pi1 = (const int *) ((char *) pBaseI1+j*w*sizeof(int));
const int *pisq0 = (const int *) ((char *) pBaseISq0+j*w*sizeof(int));
const int *pisq1 = (const int *) ((char *) pBaseISq1+j*w*sizeof(int));
const int *pit0 = (const int *) ((char *) pBaseIT0+j*w*sizeof(int));
const int *pit1 = (const int *) ((char *) pBaseIT1+j*w*sizeof(int));
for ( int i = 0; i < w; i++ ) {
if ( pi0[i] != pi1[i] ||
pisq0[i] != pisq1[i] ||
pit0[i] != pit1[i] ) {
printf( "Mismatch pi[%d] = %d, reference = %d\n", i, pi0[i], pi1[i] );
printf( "Mismatch pisq[%d] = %d, reference = %d\n", i, pisq0[i], pisq1[i] );
printf( "Mismatch pit[%d] = %d, reference = %d\n", i, pit0[i], pit1[i] );
fflush( stdout );
//CH_ASSERT(0);
return 1;
}
}
}
return 0;
}
unsigned char
ReadPixel( unsigned char *base, int pitch, int w, int h, int x, int y )
{
if ( x < 0 ) x = 0;
if ( x >= w ) x = w-1;
if ( y < 0 ) y = 0;
if ( y >= h ) y = h-1;
return base[y*pitch+x];
}
void
corrCPU( float *pCorr,
int *_pI, int *_pISq, int *_pIT,
size_t CorrPitch,
int cPixels,
int xTemplate, int yTemplate,
int w, int h,
unsigned char *img, int imgPitch,
unsigned char *tmp, int tmpPitch )
{
for ( int row = 0; row < h; row += 1 ) {
float *pOut = (float *) (((char *) pCorr)+row*CorrPitch);
int *pI = (int *) (((char *) _pI)+row*CorrPitch);
int *pISq = (int *) (((char *) _pISq)+row*CorrPitch);
int *pIT = (int *) (((char *) _pIT)+row*CorrPitch);
for ( int col = 0; col < w; col += 1 ) {
int SumI = 0;
int SumT = 0;
int SumISq = 0;
int SumTSq = 0;
int SumIT = 0;
for ( int j = 0; j < cPixels; j++ ) {
unsigned char I = ReadPixel( img, imgPitch, w, h, col+poffsetx[j], row+poffsety[j] );
unsigned char T = ReadPixel( tmp, tmpPitch, w, h, xTemplate+poffsetx[j], yTemplate+poffsety[j] );
SumI += I;
SumT += T;
SumISq += I*I;
SumTSq += T*T;
SumIT += I*T;
}
float fDenomExp = float((double) cPixels*SumTSq - (double) SumT*SumT);
pI[col] = SumI;
pISq[col] = SumISq;
pIT[col] = SumIT;
pOut[col] = CorrelationValue( (float) SumI, (float) SumISq, (float) SumIT, (float) SumT, (float) cPixels, fDenomExp );
}
}
}
bool
TestCorrelation(
double *pixelsPerSecond, // passbacks to report performance
double *templatePixelsPerSecond, //
int xOffset, int yOffset, // offset into image
int w, int h, // width and height of output
const float *hrefCorr, // host reference data
const int *hrefSumI,
const int *hrefSumISq,
const int *hrefSumIT,
int xTemplate, int yTemplate, // reference point in template image
int wTemplate, int hTemplate,
int wTile, // width of image tile
int sharedPitch, int sharedMem,
dim3 threads, dim3 blocks,
void (*pfnCorrelationSums)(
float *dCorr, int CorrPitch,
int *dSumI, int *dSumISq, int *dSumIT,
int wTile,
int wTemplate, int hTemplate,
float cPixels,
float fDenomExp,
int sharedPitch,
int xOffset, int yOffset,
int xTemplate, int yTemplate,
int xUL, int yUL, int w, int h,
dim3 threads, dim3 blocks,
int sharedMem ),
void (*pfnCorrelation)(
float *dCorr, int CorrPitch,
int wTile,
int wTemplate, int hTemplate,
float cPixels,
float fDenomExp,
int sharedPitch,
int xOffset, int yOffset,
int xTemplate, int yTemplate,
int xUL, int yUL, int w, int h,
dim3 threads, dim3 blocks,
int sharedMem ),
bool bPrintNeighborhood = false,
int cIterations = 1,
const char *outputFilename = NULL
)
{
cudaError_t status;
bool ret = false;
size_t CorrPitch;
float cPixels = (float) wTemplate*hTemplate;
float fDenomExp = float((double) cPixels*g_cpuSumTSq - (double) g_cpuSumT*g_cpuSumT);
float *hCorr = NULL, *dCorr = NULL;
int *hSumI = NULL, *dSumI = NULL;
int *hSumISq = NULL, *dSumISq = NULL;
int *hSumIT = NULL, *dSumIT = NULL;
cudaEvent_t start = 0, stop = 0;
hCorr = (float *) malloc( w*sizeof(float)*h );
hSumI = (int *) malloc( w*sizeof(int)*h );
hSumISq = (int *) malloc( w*sizeof(int)*h );
hSumIT = (int *) malloc( w*sizeof(int)*h );
if ( NULL == hCorr || NULL == hSumI || NULL == hSumISq || NULL == hSumIT )
goto Error;
cuda(MallocPitch( (void **) &dCorr, &CorrPitch, w*sizeof(float), h ) );
cuda(MallocPitch( (void **) &dSumI, &CorrPitch, w*sizeof(int), h ) );
cuda(MallocPitch( (void **) &dSumISq, &CorrPitch, w*sizeof(int), h ) );
cuda(MallocPitch( (void **) &dSumIT, &CorrPitch, w*sizeof(int), h ) );
cuda(Memset( dCorr, 0, CorrPitch*h ) );
cuda(Memset( dSumI, 0, CorrPitch*h ) );
cuda(Memset( dSumISq, 0, CorrPitch*h ) );
cuda(Memset( dSumIT, 0, CorrPitch*h ) );
cuda(EventCreate( &start, 0 ) );
cuda(EventCreate( &stop, 0 ) );
pfnCorrelationSums(
dCorr, CorrPitch,
dSumI, dSumISq, dSumIT,
wTile,
wTemplate, hTemplate,
cPixels, fDenomExp,
sharedPitch,
xOffset, yOffset,
xTemplate, yTemplate,
0, 0, w, h,
threads, blocks, sharedMem );
cuda(Memcpy2D( hSumI, w*sizeof(int), dSumI, CorrPitch, w*sizeof(int), h, cudaMemcpyDeviceToHost ) );
cuda(Memcpy2D( hSumISq, w*sizeof(int), dSumISq, CorrPitch, w*sizeof(int), h, cudaMemcpyDeviceToHost ) );
cuda(Memcpy2D( hSumIT, w*sizeof(int), dSumIT, CorrPitch, w*sizeof(int), h, cudaMemcpyDeviceToHost ) );
if ( bCompareSums( hSumI, hSumISq, hSumIT,
hrefSumI, hrefSumISq, hrefSumIT,
w, h ) ) {
//CH_ASSERT(0);
printf( "Sums miscompare\n" );
goto Error;
}
cuda(Memcpy2D( hCorr, w*sizeof(float), dCorr, CorrPitch, w*sizeof(float), h, cudaMemcpyDeviceToHost ) );
if ( bCompareCorrValues( hrefCorr, hCorr, w, h ) ) {
//CH_ASSERT(0);
printf( "Correlation coefficients generated by sums kernel mismatch\n" );
return 1;
}
cuda(Memset2D( dCorr, CorrPitch, 0, w*sizeof(float), h ) );
cuda(DeviceSynchronize() );
cuda(EventRecord( start, 0 ) );
for ( int i = 0; i < cIterations; i++ ) {
pfnCorrelation(
dCorr, CorrPitch,
wTile,
wTemplate, hTemplate,
cPixels, fDenomExp,
sharedPitch,
xOffset, yOffset,
xTemplate, yTemplate,
0, 0, w, h,
threads, blocks, sharedMem );
}
cuda(EventRecord( stop, 0 ) );
cuda(Memcpy2D( hCorr, w*sizeof(float), dCorr, CorrPitch, w*sizeof(float), h, cudaMemcpyDeviceToHost ) );
if ( bCompareCorrValues( hrefCorr, hCorr, w, h ) ) {
CH_ASSERT(0);
printf( "Correlation coefficients generated by coefficient-only kernel mismatch\n" );
return 1;
}
{
float ms;
cuda(EventElapsedTime( &ms, start, stop ) );
*pixelsPerSecond = (double) w*h*cIterations*1000.0 / ms;
*templatePixelsPerSecond = *pixelsPerSecond*wTemplate*hTemplate;
}
if ( bPrintNeighborhood ) {
printf( "\nNeighborhood around template:\n" );
for ( int VertOffset = -4; VertOffset <= 4; VertOffset++ ) {
const float *py = hrefCorr+w*(VertOffset+yTemplate);
for ( int HorzOffset = -4; HorzOffset <= 4; HorzOffset++ ) {
printf( "%6.2f", py[xTemplate+HorzOffset] );
}
printf("\n");
}
}
if ( outputFilename ) {
unsigned char *correlationValues = (unsigned char *) malloc( w*h );
if ( ! correlationValues ) {
status = cudaErrorMemoryAllocation;
goto Error;
}
for ( int row = 0; row < h; row++ ) {
for ( int col = 0; col < w; col++ ) {
int index = row*w+col;
float value = hCorr[index] < 0.0f ? 0.0f : logf( 1.0f+hCorr[index] )/logf(2.0f);
if ( value < 0.5f ) value = 0.0f;
value = 2.0f * (value - 0.5f);
correlationValues[index] = (unsigned char) (255.0f*value+0.5f);
}
}
if ( 0 != pgmSave( outputFilename, correlationValues, w, h ) ) {
status = cudaErrorUnknown;
goto Error;
}
free( correlationValues );
}
ret = true;
Error:
cudaEventDestroy( start );
cudaEventDestroy( stop );
free( hCorr );
free( hSumI );
free( hSumISq );
free( hSumIT );
if ( dCorr ) cudaFree( dCorr );
if ( dSumI ) cudaFree( dSumI );
if ( dSumI ) cudaFree( dSumISq );
if ( dSumI ) cudaFree( dSumIT );
return ret;
}
int
main(int argc, char *argv[])
{
int ret = 1;
cudaError_t status;
unsigned char *hidata = NULL;
unsigned char *didata = NULL;
float *hoCorrCPU = NULL;
int *hoCorrCPUI = NULL;
int *hoCorrCPUISq = NULL;
int *hoCorrCPUIT = NULL;
unsigned int HostPitch, DevicePitch;
int w, h;
int wTemplate = 52;
int hTemplate = 52;
int xOffset, yOffset;
int xTemplate = 210;
int yTemplate = 148;
int wTile;
dim3 threads;
dim3 blocks;
int sharedPitch;
int sharedMem;
char defaultInputFilename[] = "coins.pgm";
char *inputFilename = defaultInputFilename;
char *outputFilename = NULL;
cudaArray *pArrayImage = NULL;
cudaArray *pArrayTemplate = NULL;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
if ( chCommandLineGetBool( "help", argc, argv ) ) {
printf( "Usage:\n" );
printf( " --input <filename>: specify input filename (must be PGM)\n" );
printf( " --output <filename>: Write PGM of correlation values (0..255) to <filename>.\n" );
printf( " --padWidth <value>: pad input image width to specified value\n" );
printf( " --padHeight <value>: pad input image height to specified value\n" );
printf( " --xTemplate <value>: X coordinate of upper left corner of template\n" );
printf( " --yTemplate <value>: Y coordinate of upper left corner of template\n" );
printf( " --wTemplate <value>: Width of template\n" );
printf( " --hTemplate <value>: Height of template\n" );
printf( "\nDefault values are coins.pgm, no output file or padding, and template of the dime in the\n" );
printf("lower right corner of coins.pgm: xTemplate=210, yTemplate=148, wTemplate=hTemplate=52\n" );
return 0;
}
cuda(SetDeviceFlags( cudaDeviceMapHost ) );
cuda(DeviceSetCacheConfig( cudaFuncCachePreferShared ) );
if ( chCommandLineGet( &inputFilename, "input", argc, argv ) ) {
printf( "Reading from image file %s\n", inputFilename );
}
chCommandLineGet( &outputFilename, "output", argc, argv );
{
int padWidth = 0;
int padHeight = 0;
if ( chCommandLineGet( &padWidth, "padWidth", argc, argv ) ) {
if ( ! chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
else {
if ( chCommandLineGet( &padHeight, "padHeight", argc, argv ) ) {
printf( "Must specify both --padWidth and --padHeight\n" );
goto Error;
}
}
if ( pgmLoad(inputFilename, &hidata, &HostPitch, &didata, &DevicePitch, &w, &h, padWidth, padHeight) )
goto Error;
}
chCommandLineGet( &xTemplate, "xTemplate", argc, argv );
chCommandLineGet( &yTemplate, "yTemplate", argc, argv );
chCommandLineGet( &wTemplate, "wTemplate", argc, argv );
chCommandLineGet( &hTemplate, "hTemplate", argc, argv );
xOffset = -wTemplate/2;
yOffset = -wTemplate/2;
hoCorrCPU = (float *) malloc(w*h*sizeof(float)); if ( ! hoCorrCPU ) return 1;
hoCorrCPUI = (int *) malloc(w*h*sizeof(int)); if ( ! hoCorrCPUI ) return 1;
hoCorrCPUISq = (int *) malloc(w*h*sizeof(int)); if ( ! hoCorrCPUISq ) return 1;
hoCorrCPUIT = (int *) malloc(w*h*sizeof(int)); if ( ! hoCorrCPUIT ) return 1;
if ( NULL == hoCorrCPU ||
NULL == hoCorrCPUI ||
NULL == hoCorrCPUISq ||
NULL == hoCorrCPUIT )
goto Error;
cuda(MallocArray( &pArrayImage, &desc, w, h ) );
cuda(MallocArray( &pArrayTemplate, &desc, w, h ) );
cuda(MemcpyToArray( pArrayImage, 0, 0, hidata, w*h, cudaMemcpyHostToDevice ) );
cuda(Memcpy2DArrayToArray( pArrayTemplate, 0, 0, pArrayImage, 0, 0, w, h, cudaMemcpyDeviceToDevice ) );
cuda(BindTextureToArray( texImage, pArrayImage ) );
cuda(BindTextureToArray( texTemplate, pArrayTemplate ) );
CopyToTemplate( didata, DevicePitch,
xTemplate, yTemplate,
wTemplate, hTemplate,
xOffset, yOffset );
corrCPU( hoCorrCPU, hoCorrCPUI, hoCorrCPUISq, hoCorrCPUIT,
w*sizeof(float), wTemplate*hTemplate, xTemplate-xOffset, yTemplate-yOffset, w, h,
hidata, HostPitch, hidata, HostPitch );
// height of thread block must be >= hTemplate
wTile = 32;
threads = dim3(32,8);
blocks = dim3(w/wTile+(0!=w%wTile),h/threads.y+(0!=h%threads.y));
sharedPitch = ~63&(wTile+wTemplate+63);
sharedMem = sharedPitch*(threads.y+hTemplate);
#define TEST_VECTOR( baseName, bPrintNeighborhood, cIterations, outfile ) \
{ \
double pixelsPerSecond; \
double templatePixelsPerSecond; \
if ( ! TestCorrelation( &pixelsPerSecond, \
&templatePixelsPerSecond, \
xOffset, yOffset, \
w, h, \
hoCorrCPU, \
hoCorrCPUI, \
hoCorrCPUISq, \
hoCorrCPUIT, \
xTemplate-xOffset, yTemplate-yOffset, \
wTemplate, hTemplate, \
wTile, sharedPitch, sharedMem, \
threads, blocks, \
baseName##Sums, \
baseName, \
bPrintNeighborhood, cIterations, outfile ) ) { \
printf( "Error\n" ); \
} \
printf( "%s: %.2f Mpix/s\t%.2fGtpix/s\n", \
#baseName, pixelsPerSecond/1e6, templatePixelsPerSecond/1e9 ); \
}
TEST_VECTOR( corrShared, false, 100, NULL );
// height of thread block must be >= hTemplate
wTile = 32;
threads = dim3(32,8);
blocks = dim3(w/wTile+(0!=w%wTile),h/threads.y+(0!=h%threads.y));
sharedPitch = ~63&(((wTile+wTemplate)+63));
sharedMem = sharedPitch*(threads.y+hTemplate);
TEST_VECTOR( corrSharedSM, false, 100, NULL );
TEST_VECTOR( corrShared4, false, 100, NULL );
// set up blocking parameters for 2D tex-constant formulation
threads.x = 32; threads.y = 16; threads.z = 1;
blocks.x = INTCEIL(w,threads.x); blocks.y = INTCEIL(h,threads.y); blocks.z = 1;
TEST_VECTOR( corrTexConstant, false, 100, NULL );
if ( outputFilename ) {
printf( "Writing graymap of correlation values to %s\n", outputFilename );
}
// set up blocking parameters for 2D tex-tex formulation
threads.x = 16; threads.y = 8; threads.z = 1;
blocks.x = INTCEIL(w,threads.x); blocks.y = INTCEIL(h,threads.y); blocks.z = 1;
TEST_VECTOR( corrTexTex, false, 100, outputFilename );
ret = 0;
Error:
free( hoCorrCPU );
free( hoCorrCPUI );
free( hoCorrCPUISq );
free( hoCorrCPUIT );
free( hidata );
cudaFree(didata);
cudaFreeArray(pArrayImage);
cudaFreeArray(pArrayTemplate);
return ret;
}
|
02a1c3ce8d794a4b948a220173d446a8717a3791.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "MultiHashGraph.cuh"
#include <algorithm>
#include <unistd.h>
// #define RAND_KEYS
// #define PRINT_KEYS
// #define BUILD_TEST
struct prg {
hkey_t lo, hi;
__host__ __device__ prg(hkey_t _lo=0, hkey_t _hi=0) : lo(_lo), hi(_hi) {};
__host__ __device__ hkey_t operator()(index_t index) const {
thrust::default_random_engine rng(index);
thrust::uniform_int_distribution<hkey_t> dist(lo, hi);
rng.discard(index);
return dist(rng);
}
};
// A recursive binary search function. It returns location of x in given array arr[l..r] is present,
// otherwise it returns the bin id with the smallest value larger than x
int64_t binarySearch(hkey_t *bins, int32_t l, int64_t r, int32_t x) {
if (r >= l) {
int64_t mid = l + (r - l) / 2;
// If the element is present at the middle itself
if (bins[mid] == x)
return mid;
// If element is smaller than mid, then it can only be present in left subarray
if (bins[mid] > x)
return binarySearch(bins, l, mid - 1, x);
// Else the element can only be present in right subarray
return binarySearch(bins, mid + 1, r, x);
}
// We reach here when element is not present in array and return the bin id
// of the smallest value greater than x
return l;
}
void enablePeerAccess(uint32_t gpuCount) {
// Enable P2P access between each pair of GPUs.
for (index_t j = 0; j < gpuCount; j++) {
hipSetDevice(j);
for (index_t i = 0; i < gpuCount; i++) {
if (j != i) {
int isCapable;
hipDeviceCanAccessPeer(&isCapable, j, i);
if (isCapable == 1) {
hipError_t err = hipDeviceEnablePeerAccess(i, 0);
if (err == hipErrorPeerAccessAlreadyEnabled) {
hipGetLastError();
}
}
}
}
}
}
void generateInput(inputData *h_dVals, index_t countSize, index_t maxkey, uint32_t gpuCount,
index_t seed) {
std::cout << "generating input" << std::endl;
index_t avgKeyCount = ::ceil(countSize / ((double) gpuCount));
for (index_t i = 0; i < gpuCount; i++) {
hipSetDevice(i);
index_t lo = avgKeyCount * i;
index_t hi = avgKeyCount * (i + 1);
hi = ::min(hi, countSize);
index_t keyCount = hi - lo;
hipMalloc(&h_dVals[i].d_keys, keyCount * sizeof(hkey_t));
hipMalloc(&h_dVals[i].d_hash, keyCount * sizeof(HashKey));
// RMM_ALLOC(&h_dVals[i].d_keys, keyCount * sizeof(hkey_t), 0);
// RMM_ALLOC(&h_dVals[i].d_hash, keyCount * sizeof(HashKey), 0);
#ifdef RAND_KEYS
// Randomly generate input keys on each device.
thrust::counting_iterator<index_t> index_sequence_begin(seed);
thrust::transform(thrust::device, index_sequence_begin, index_sequence_begin + keyCount,
h_dVals[i].d_keys, prg(0, maxkey - 1));
#else
hkey_t *h_tmpKeys = new hkey_t[keyCount]();
for (index_t j = lo; j < hi; j++) {
h_tmpKeys[j - lo] = j;
}
hipMemcpy(h_dVals[i].d_keys, h_tmpKeys, keyCount * sizeof(hkey_t), hipMemcpyHostToDevice);
#endif
h_dVals[i].len = keyCount;
#ifdef PRINT_KEYS
std::cout << "keys gpu " << i << std::endl;
thrust::device_ptr<hkey_t> td_keys = thrust::device_pointer_cast(h_dVals[i].d_keys);
for (uint32_t j = 0; j < keyCount; j++) {
std::cout << *(td_keys + j) << " ";
}
std::cout << std::endl;
#endif
seed += keyCount;
}
std::cout << "done generating input" << std::endl;
}
int main(int argc, char **argv) {
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
std::cout << "deviceCount: " << deviceCount << std::endl;
char hostname[HOST_NAME_MAX];
gethostname(hostname, HOST_NAME_MAX);
std::cout << "hostname: " << hostname << std::endl;
index_t countSizeA = 1L << 24;
index_t maxkey = 1L << 26;
uint32_t binCount = 16000;
uint32_t gpuCount = 4;
index_t lrbBins = -1;
bool checkCorrectness = false;
bool buildTest = false;
index_t countSizeB = 1L << 22;
if (argc >= 2 && argc < 9) {
std::cerr << "Please specify all arguments.\n";
return 1;
}
if (argc >= 3) {
index_t size = strtoull(argv[1], NULL, 0);
countSizeA = size;
index_t key = strtoull(argv[2], NULL, 0);
maxkey = key;
binCount = atoi(argv[3]);
gpuCount = atoi(argv[4]);
lrbBins = strtoull(argv[5], NULL, 0);
// char *correctnessFlag = atoi(argv[5]);
// if (correctnessFlag > 0) {
if (!strcmp(argv[6], "check")) {
checkCorrectness = true;
}
countSizeB = strtoull(argv[7], NULL, 0);
if (!strcmp(argv[8], "build")) {
buildTest = true;
}
}
index_t tableSize = maxkey;
std::cout << "countSizeA: " << countSizeA << std::endl;
std::cout << "maxkey: " << maxkey << std::endl;
// rmm_mgpu_context_t contextA;
// rmm_mgpu_context_t contextB;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float buildTime = 0.0f; // milliseoncds
// enablePeerAccess(gpuCount);
// rmmOptions_t rmmO;
// rmmO.initial_pool_size = 1L << 60;
// rmmO.allocation_mode = PoolAllocation;
// rmmO.enable_logging = false;
// rmmO.num_devices = 16;
// int *devices = (int *)malloc(gpuCount * sizeof(int));
// for (index_t i = 0; i < gpuCount; i++) {
// devices[i] = i;
// }
//
// rmmO.devices = devices;
// rmmInitialize(&rmmO);
if (buildTest) {
inputData *h_dVals = new inputData[gpuCount]();
generateInput(h_dVals, countSizeA, maxkey, gpuCount, 0);
// MultiHashGraph mhg(h_dVals, countSizeA, maxkey, contextA, tableSize, binCount, lrbBins, gpuCount);
MultiHashGraph mhg(h_dVals, countSizeA, maxkey, tableSize, binCount, lrbBins, gpuCount);
omp_set_num_threads(gpuCount);
#ifdef CUDA_PROFILE
hipProfilerStart();
#endif
hipSetDevice(0);
hipEventRecord(start);
#pragma omp parallel
{
index_t tid = omp_get_thread_num();
mhg.build(true, tid);
} // pragma
hipSetDevice(0);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&buildTime, start, stop);
#ifdef CUDA_PROFILE
hipProfilerStop();
CHECK_ERROR("end of build");
#endif
std::cout << "multi buildTable() time: " << (buildTime / 1000.0) << "\n"; // seconds
if (checkCorrectness) {
mhg.destroyMulti();
mhg.buildSingle();
}
} else {
inputData *h_dValsA = new inputData[gpuCount]();
inputData *h_dValsB = new inputData[gpuCount]();
generateInput(h_dValsA, countSizeA, maxkey, gpuCount, 0);
generateInput(h_dValsB, countSizeB, maxkey, gpuCount, countSizeA);
std::cout << "hashgraph constructors" << std::endl;
// MultiHashGraph mhgA(h_dValsA, countSizeA, maxkey, contextA, tableSize, binCount, lrbBins, gpuCount);
// MultiHashGraph mhgB(h_dValsB, countSizeB, maxkey, contextB, tableSize, binCount, lrbBins, gpuCount);
MultiHashGraph mhgA(h_dValsA, countSizeA, maxkey, tableSize, binCount, lrbBins, gpuCount);
MultiHashGraph mhgB(h_dValsB, countSizeB, maxkey, tableSize, binCount, lrbBins, gpuCount);
std::cout << "done hashgraph constructors" << std::endl;
#ifdef MANAGED_MEM
std::cout << "managed mem constructors" << std::endl;
index_t size = 2 * (tableSize + gpuCount) * sizeof(index_t);
hipMallocManaged(&mhgA.uvmPtrIntersect, size);
mhgA.prefixArrayIntersect = new index_t[gpuCount + 1]();
mhgA.totalSizeIntersect = size;
std::cout << "done managed mem constructors" << std::endl;
#endif
keypair **h_dOutput = new keypair*[gpuCount]();
index_t *h_Common = new index_t[gpuCount]();
omp_set_num_threads(gpuCount);
#ifdef CUDA_PROFILE
hipProfilerStart();
#endif
hipSetDevice(0);
hipEventRecord(start);
#pragma omp parallel
{
index_t tid = omp_get_thread_num();
mhgA.build(true, tid);
#pragma omp master
{
mhgB.h_binSplits = mhgA.h_binSplits; // small memory leak.
mhgB.h_dBinSplits = mhgA.h_dBinSplits;
#ifdef MANAGED_MEM
mhgA.prefixArrayIntersect[0] = 0;
for (index_t i = 1; i < gpuCount; i++) {
index_t tidHashRange = mhgA.h_binSplits[i] - mhgA.h_binSplits[i - 1];
index_t size = 2 * (tidHashRange + 1) * sizeof(index_t);
mhgA.prefixArrayIntersect[i] = mhgA.prefixArrayIntersect[i - 1] + size;
}
mhgA.prefixArrayIntersect[gpuCount] = mhgA.totalSizeIntersect;
mhgA.h_dCountCommon[0] = mhgA.uvmPtrIntersect;
for (index_t i = 1; i < gpuCount; i++) {
mhgA.h_dCountCommon[i] = mhgA.uvmPtrIntersect +
mhgA.prefixArrayIntersect[i];
}
#endif
} // master
#pragma omp barrier
mhgB.build(false, tid); // Build second HG but use same splits as first HG.
#pragma omp barrier
MultiHashGraph::intersect(mhgA, mhgB, h_Common, h_dOutput, tid);
} // pragma
hipSetDevice(0);
hipEventRecord(stop);
#ifdef CUDA_PROFILE
hipProfilerStop();
CHECK_ERROR("end of intersect");
#endif
hipEventSynchronize(stop);
hipEventElapsedTime(&buildTime, start, stop);
std::cout << "multi intersect() time: " << (buildTime / 1000.0) << "\n"; // seconds
if (checkCorrectness) {
mhgA.buildSingle();
mhgB.buildSingle();
index_t outputSize = 0;
for (index_t i = 0; i < gpuCount; i++) {
outputSize += h_Common[i];
}
keypair *h_output = new keypair[outputSize]();
index_t h_idx = 0;
for (index_t i = 0; i < gpuCount; i++) {
hipSetDevice(i);
hipMemcpy(h_output + h_idx, h_dOutput[i], h_Common[i] * sizeof(keypair),
hipMemcpyDeviceToHost);
h_idx += h_Common[i];
}
std::vector<hkey_t> result;
result.reserve(outputSize);
for (index_t i = 0; i < outputSize; i++) {
result.push_back(h_output[i].right);
}
if (result.size() != result.capacity()) {
std::cerr << "ERROR: RESULT ERROR" << std::endl;
exit(0);
}
std::sort(mhgA.h_vals, mhgA.h_vals + countSizeA);
std::sort(mhgB.h_vals, mhgB.h_vals + countSizeB);
std::vector<hkey_t> ans;
ans.reserve(outputSize);
for (index_t i = 0; i < countSizeA; i++) {
index_t ogIdx = binarySearch(mhgB.h_vals, 0, countSizeB - 1, mhgA.h_vals[i]);
index_t idx = ogIdx;
while (idx >= 0 && mhgB.h_vals[idx] == mhgA.h_vals[i]) {
ans.push_back(mhgA.h_vals[i]);
idx--;
}
idx = ogIdx + 1;
while (idx < countSizeB && mhgB.h_vals[idx] == mhgA.h_vals[i]) {
ans.push_back(mhgA.h_vals[i]);
idx++;
}
// for (index_t j = 0; j < countSizeB; j++) {
// if (mhgA.h_vals[i] == mhgB.h_vals[j]) {
// ans.push_back(mhgA.h_vals[i]);
// }
// if (mhgA.h_vals[i] < mhgB.h_vals[j]) {
// break;
// }
// }
}
if (ans.size() != outputSize) {
std::cerr << "ERROR: INTERSECT OUTPUT HAS INCORRECT SIZE" << std::endl;
std::cerr << "ansSize: " << ans.size() << " outputSize: " << outputSize << std::endl;
// exit(0);
}
std::sort(result.begin(), result.end());
std::sort(ans.begin(), ans.end());
if (result != ans) {
std::cerr << "ERROR: INTERSECT OUTPUT HAS INCORRECT CONTENT" << std::endl;
std::cout << "output: " << std::endl;
for (auto i = result.begin(); i != result.end(); ++i) {
std::cout << *i << " ";
}
std::cout << std::endl;
std::cout << "ans: " << std::endl;
for (auto i = ans.begin(); i != ans.end(); ++i) {
std::cout << *i << " ";
}
std::cout << std::endl;
exit(0);
}
}
}
}
| 02a1c3ce8d794a4b948a220173d446a8717a3791.cu | /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "MultiHashGraph.cuh"
#include <algorithm>
#include <unistd.h>
// #define RAND_KEYS
// #define PRINT_KEYS
// #define BUILD_TEST
struct prg {
hkey_t lo, hi;
__host__ __device__ prg(hkey_t _lo=0, hkey_t _hi=0) : lo(_lo), hi(_hi) {};
__host__ __device__ hkey_t operator()(index_t index) const {
thrust::default_random_engine rng(index);
thrust::uniform_int_distribution<hkey_t> dist(lo, hi);
rng.discard(index);
return dist(rng);
}
};
// A recursive binary search function. It returns location of x in given array arr[l..r] is present,
// otherwise it returns the bin id with the smallest value larger than x
int64_t binarySearch(hkey_t *bins, int32_t l, int64_t r, int32_t x) {
if (r >= l) {
int64_t mid = l + (r - l) / 2;
// If the element is present at the middle itself
if (bins[mid] == x)
return mid;
// If element is smaller than mid, then it can only be present in left subarray
if (bins[mid] > x)
return binarySearch(bins, l, mid - 1, x);
// Else the element can only be present in right subarray
return binarySearch(bins, mid + 1, r, x);
}
// We reach here when element is not present in array and return the bin id
// of the smallest value greater than x
return l;
}
void enablePeerAccess(uint32_t gpuCount) {
// Enable P2P access between each pair of GPUs.
for (index_t j = 0; j < gpuCount; j++) {
cudaSetDevice(j);
for (index_t i = 0; i < gpuCount; i++) {
if (j != i) {
int isCapable;
cudaDeviceCanAccessPeer(&isCapable, j, i);
if (isCapable == 1) {
cudaError_t err = cudaDeviceEnablePeerAccess(i, 0);
if (err == cudaErrorPeerAccessAlreadyEnabled) {
cudaGetLastError();
}
}
}
}
}
}
void generateInput(inputData *h_dVals, index_t countSize, index_t maxkey, uint32_t gpuCount,
index_t seed) {
std::cout << "generating input" << std::endl;
index_t avgKeyCount = std::ceil(countSize / ((double) gpuCount));
for (index_t i = 0; i < gpuCount; i++) {
cudaSetDevice(i);
index_t lo = avgKeyCount * i;
index_t hi = avgKeyCount * (i + 1);
hi = std::min(hi, countSize);
index_t keyCount = hi - lo;
cudaMalloc(&h_dVals[i].d_keys, keyCount * sizeof(hkey_t));
cudaMalloc(&h_dVals[i].d_hash, keyCount * sizeof(HashKey));
// RMM_ALLOC(&h_dVals[i].d_keys, keyCount * sizeof(hkey_t), 0);
// RMM_ALLOC(&h_dVals[i].d_hash, keyCount * sizeof(HashKey), 0);
#ifdef RAND_KEYS
// Randomly generate input keys on each device.
thrust::counting_iterator<index_t> index_sequence_begin(seed);
thrust::transform(thrust::device, index_sequence_begin, index_sequence_begin + keyCount,
h_dVals[i].d_keys, prg(0, maxkey - 1));
#else
hkey_t *h_tmpKeys = new hkey_t[keyCount]();
for (index_t j = lo; j < hi; j++) {
h_tmpKeys[j - lo] = j;
}
cudaMemcpy(h_dVals[i].d_keys, h_tmpKeys, keyCount * sizeof(hkey_t), cudaMemcpyHostToDevice);
#endif
h_dVals[i].len = keyCount;
#ifdef PRINT_KEYS
std::cout << "keys gpu " << i << std::endl;
thrust::device_ptr<hkey_t> td_keys = thrust::device_pointer_cast(h_dVals[i].d_keys);
for (uint32_t j = 0; j < keyCount; j++) {
std::cout << *(td_keys + j) << " ";
}
std::cout << std::endl;
#endif
seed += keyCount;
}
std::cout << "done generating input" << std::endl;
}
int main(int argc, char **argv) {
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
std::cout << "deviceCount: " << deviceCount << std::endl;
char hostname[HOST_NAME_MAX];
gethostname(hostname, HOST_NAME_MAX);
std::cout << "hostname: " << hostname << std::endl;
index_t countSizeA = 1L << 24;
index_t maxkey = 1L << 26;
uint32_t binCount = 16000;
uint32_t gpuCount = 4;
index_t lrbBins = -1;
bool checkCorrectness = false;
bool buildTest = false;
index_t countSizeB = 1L << 22;
if (argc >= 2 && argc < 9) {
std::cerr << "Please specify all arguments.\n";
return 1;
}
if (argc >= 3) {
index_t size = strtoull(argv[1], NULL, 0);
countSizeA = size;
index_t key = strtoull(argv[2], NULL, 0);
maxkey = key;
binCount = atoi(argv[3]);
gpuCount = atoi(argv[4]);
lrbBins = strtoull(argv[5], NULL, 0);
// char *correctnessFlag = atoi(argv[5]);
// if (correctnessFlag > 0) {
if (!strcmp(argv[6], "check")) {
checkCorrectness = true;
}
countSizeB = strtoull(argv[7], NULL, 0);
if (!strcmp(argv[8], "build")) {
buildTest = true;
}
}
index_t tableSize = maxkey;
std::cout << "countSizeA: " << countSizeA << std::endl;
std::cout << "maxkey: " << maxkey << std::endl;
// rmm_mgpu_context_t contextA;
// rmm_mgpu_context_t contextB;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float buildTime = 0.0f; // milliseoncds
// enablePeerAccess(gpuCount);
// rmmOptions_t rmmO;
// rmmO.initial_pool_size = 1L << 60;
// rmmO.allocation_mode = PoolAllocation;
// rmmO.enable_logging = false;
// rmmO.num_devices = 16;
// int *devices = (int *)malloc(gpuCount * sizeof(int));
// for (index_t i = 0; i < gpuCount; i++) {
// devices[i] = i;
// }
//
// rmmO.devices = devices;
// rmmInitialize(&rmmO);
if (buildTest) {
inputData *h_dVals = new inputData[gpuCount]();
generateInput(h_dVals, countSizeA, maxkey, gpuCount, 0);
// MultiHashGraph mhg(h_dVals, countSizeA, maxkey, contextA, tableSize, binCount, lrbBins, gpuCount);
MultiHashGraph mhg(h_dVals, countSizeA, maxkey, tableSize, binCount, lrbBins, gpuCount);
omp_set_num_threads(gpuCount);
#ifdef CUDA_PROFILE
cudaProfilerStart();
#endif
cudaSetDevice(0);
cudaEventRecord(start);
#pragma omp parallel
{
index_t tid = omp_get_thread_num();
mhg.build(true, tid);
} // pragma
cudaSetDevice(0);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&buildTime, start, stop);
#ifdef CUDA_PROFILE
cudaProfilerStop();
CHECK_ERROR("end of build");
#endif
std::cout << "multi buildTable() time: " << (buildTime / 1000.0) << "\n"; // seconds
if (checkCorrectness) {
mhg.destroyMulti();
mhg.buildSingle();
}
} else {
inputData *h_dValsA = new inputData[gpuCount]();
inputData *h_dValsB = new inputData[gpuCount]();
generateInput(h_dValsA, countSizeA, maxkey, gpuCount, 0);
generateInput(h_dValsB, countSizeB, maxkey, gpuCount, countSizeA);
std::cout << "hashgraph constructors" << std::endl;
// MultiHashGraph mhgA(h_dValsA, countSizeA, maxkey, contextA, tableSize, binCount, lrbBins, gpuCount);
// MultiHashGraph mhgB(h_dValsB, countSizeB, maxkey, contextB, tableSize, binCount, lrbBins, gpuCount);
MultiHashGraph mhgA(h_dValsA, countSizeA, maxkey, tableSize, binCount, lrbBins, gpuCount);
MultiHashGraph mhgB(h_dValsB, countSizeB, maxkey, tableSize, binCount, lrbBins, gpuCount);
std::cout << "done hashgraph constructors" << std::endl;
#ifdef MANAGED_MEM
std::cout << "managed mem constructors" << std::endl;
index_t size = 2 * (tableSize + gpuCount) * sizeof(index_t);
cudaMallocManaged(&mhgA.uvmPtrIntersect, size);
mhgA.prefixArrayIntersect = new index_t[gpuCount + 1]();
mhgA.totalSizeIntersect = size;
std::cout << "done managed mem constructors" << std::endl;
#endif
keypair **h_dOutput = new keypair*[gpuCount]();
index_t *h_Common = new index_t[gpuCount]();
omp_set_num_threads(gpuCount);
#ifdef CUDA_PROFILE
cudaProfilerStart();
#endif
cudaSetDevice(0);
cudaEventRecord(start);
#pragma omp parallel
{
index_t tid = omp_get_thread_num();
mhgA.build(true, tid);
#pragma omp master
{
mhgB.h_binSplits = mhgA.h_binSplits; // small memory leak.
mhgB.h_dBinSplits = mhgA.h_dBinSplits;
#ifdef MANAGED_MEM
mhgA.prefixArrayIntersect[0] = 0;
for (index_t i = 1; i < gpuCount; i++) {
index_t tidHashRange = mhgA.h_binSplits[i] - mhgA.h_binSplits[i - 1];
index_t size = 2 * (tidHashRange + 1) * sizeof(index_t);
mhgA.prefixArrayIntersect[i] = mhgA.prefixArrayIntersect[i - 1] + size;
}
mhgA.prefixArrayIntersect[gpuCount] = mhgA.totalSizeIntersect;
mhgA.h_dCountCommon[0] = mhgA.uvmPtrIntersect;
for (index_t i = 1; i < gpuCount; i++) {
mhgA.h_dCountCommon[i] = mhgA.uvmPtrIntersect +
mhgA.prefixArrayIntersect[i];
}
#endif
} // master
#pragma omp barrier
mhgB.build(false, tid); // Build second HG but use same splits as first HG.
#pragma omp barrier
MultiHashGraph::intersect(mhgA, mhgB, h_Common, h_dOutput, tid);
} // pragma
cudaSetDevice(0);
cudaEventRecord(stop);
#ifdef CUDA_PROFILE
cudaProfilerStop();
CHECK_ERROR("end of intersect");
#endif
cudaEventSynchronize(stop);
cudaEventElapsedTime(&buildTime, start, stop);
std::cout << "multi intersect() time: " << (buildTime / 1000.0) << "\n"; // seconds
if (checkCorrectness) {
mhgA.buildSingle();
mhgB.buildSingle();
index_t outputSize = 0;
for (index_t i = 0; i < gpuCount; i++) {
outputSize += h_Common[i];
}
keypair *h_output = new keypair[outputSize]();
index_t h_idx = 0;
for (index_t i = 0; i < gpuCount; i++) {
cudaSetDevice(i);
cudaMemcpy(h_output + h_idx, h_dOutput[i], h_Common[i] * sizeof(keypair),
cudaMemcpyDeviceToHost);
h_idx += h_Common[i];
}
std::vector<hkey_t> result;
result.reserve(outputSize);
for (index_t i = 0; i < outputSize; i++) {
result.push_back(h_output[i].right);
}
if (result.size() != result.capacity()) {
std::cerr << "ERROR: RESULT ERROR" << std::endl;
exit(0);
}
std::sort(mhgA.h_vals, mhgA.h_vals + countSizeA);
std::sort(mhgB.h_vals, mhgB.h_vals + countSizeB);
std::vector<hkey_t> ans;
ans.reserve(outputSize);
for (index_t i = 0; i < countSizeA; i++) {
index_t ogIdx = binarySearch(mhgB.h_vals, 0, countSizeB - 1, mhgA.h_vals[i]);
index_t idx = ogIdx;
while (idx >= 0 && mhgB.h_vals[idx] == mhgA.h_vals[i]) {
ans.push_back(mhgA.h_vals[i]);
idx--;
}
idx = ogIdx + 1;
while (idx < countSizeB && mhgB.h_vals[idx] == mhgA.h_vals[i]) {
ans.push_back(mhgA.h_vals[i]);
idx++;
}
// for (index_t j = 0; j < countSizeB; j++) {
// if (mhgA.h_vals[i] == mhgB.h_vals[j]) {
// ans.push_back(mhgA.h_vals[i]);
// }
// if (mhgA.h_vals[i] < mhgB.h_vals[j]) {
// break;
// }
// }
}
if (ans.size() != outputSize) {
std::cerr << "ERROR: INTERSECT OUTPUT HAS INCORRECT SIZE" << std::endl;
std::cerr << "ansSize: " << ans.size() << " outputSize: " << outputSize << std::endl;
// exit(0);
}
std::sort(result.begin(), result.end());
std::sort(ans.begin(), ans.end());
if (result != ans) {
std::cerr << "ERROR: INTERSECT OUTPUT HAS INCORRECT CONTENT" << std::endl;
std::cout << "output: " << std::endl;
for (auto i = result.begin(); i != result.end(); ++i) {
std::cout << *i << " ";
}
std::cout << std::endl;
std::cout << "ans: " << std::endl;
for (auto i = ans.begin(); i != ans.end(); ++i) {
std::cout << *i << " ";
}
std::cout << std::endl;
exit(0);
}
}
}
}
|
a118a125d5dff1ebabf262950a444b8d971791a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex aciwa(hipComplex z)
{
hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thy(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 20;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex rhuva(3.0,0.0);
hipComplex rarva(8.0,0.0);
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<10;v++)
{
cue = cue - (aon*halva(cue)-uon*hilva(cue))/(uon*helva(cue)-aon*hilva(cue));
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | a118a125d5dff1ebabf262950a444b8d971791a9.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex aciwa(cuComplex z)
{
cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i)));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thy(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q)));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 20;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex rhuva(3.0,0.0);
cuComplex rarva(8.0,0.0);
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// almost Klein's j-invariant
//cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva);
for(v=0;v<10;v++)
{
cue = cue - (aon*halva(cue)-uon*hilva(cue))/(uon*helva(cue)-aon*hilva(cue));
/*cue = (aon*hilva(cue)-uon*helva(cue))/(aon*halva(cue)-uon*hilva(cue));*/
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
2e98d837b6ba3d907ff52093550088300df845c3.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================
// STUDENT NAME: Zhou Yichen
// MATRIC NO. : A0113598X
// NUS EMAIL : [email protected]
// COMMENTS TO GRADER:
// Thanks for grading!
//
//============================================================
//
// FILE: unique.cu
// Include files from C standard library.
#include <stdlib.h>
#include <stdio.h>
#include <string.h> // For memcpy().
#include <math.h>
// Includes CUDA.
#include <hip/hip_runtime.h>
// Includes helper functions from CUDA Samples SDK.
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples.
// Include files to use Thrust (a C++ template library for CUDA).
// Thrust v1.7.0 is automatically installed with CUDA Toolkit 6.5.
// Read more about Thrust at the GitHub Thrust project page
// (http://thrust.github.com/).
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
/////////////////////////////////////////////////////////////////////////////
// CONSTANTS & GLOBAL VARIABLES
/////////////////////////////////////////////////////////////////////////////
#define NUM_ELEMS (5*1000000) // Number of elements in input array.
#define BLOCK_SIZE 256
#define NUM_BLOCKS ( ( (NUM_ELEMS) + (BLOCK_SIZE) - 1 ) / (BLOCK_SIZE) )
#define ELEM_MIN 1 // Minimum value in input array (must not be negative).
#define ELEM_MAX 100000 // Maximum value in input array (must not be negative).
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// Given an input sorted integer array, the kernel marks in the output array
// which elements of the input array should be kept/removed, so that if these
// elements were to be kept/removed, there would be no duplicate elements in
// the sorted array. We want to remove as few elements as possible from the
// input array.
//
// The output of the kernel is an array of 1's and 0's to indicate whether
// the corresponding elements in the input array should be kept or removed --
// a 1 means keep, and 0 means remove. The output array has the same number
// of elements as the input array.
//
// For example, given the following input array
//
// inSortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// the output would be
//
// outSelectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// so that if we keep only those elements in the input array that have a 1
// in the corresponding location in the output array, we will have the
// result [ 1 3 5 7 8 ].
//
// NOTE: You should use shared memory to minimize the number of uncoalesced
// global memory accesses. Shared memory conflicts must be minimized too.
//===========================================================================
__global__ void Kernel_MarkUnique( int *inSortedArray, int *outSelectionArray,
int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
__shared__ float sharedBlock[BLOCK_SIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
if (tid < numElems)
{
// Write the value to the shared block array
sharedBlock[tx] = inSortedArray[tid];
// sync to make sure all the values are written for the block
__syncthreads();
if (tid == 0)
{ // for the first element, always return 1
outSelectionArray[tid] = 1;
}
else
{
if (tx == 0) // for the first element in a block
{ // need to read the previous value from the gloabl memory as it is not in the current block.
if(sharedBlock[tx] > inSortedArray[tid - 1])
{
outSelectionArray[tid] = 1;
}
else
{
outSelectionArray[tid] = 0;
}
}
else
{ // otherwise can just read the previous value from the shared memory
if (sharedBlock[tx] > sharedBlock[tx - 1])
{
outSelectionArray[tid] = 1;
}
else
{
outSelectionArray[tid] = 0;
}
}
}
}
}
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// The kernel copies a selected set of elements from the input array to
// specified locations in the output array.
//
// For an input element inArray[i], if selectionArray[i] is 1, then
// the input element is copied to the output array outArray[].
// The location in the output array it is copied to is
// scatterAddressArray[i] + addressOffset.
//
// You can assume that no two elements in the input array inArray[]
// will be selected and copied to the same location in the output
// array outArray[].
//
// NOTE: You do not need to use shared memory, but try to keep the
// number of uncoalesced global memory accesses to the minimal.
//===========================================================================
__global__ void Kernel_Scatter( int *inArray, int *selectionArray,
int *scatterAddressArray, int addressOffset,
int *outArray, int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numElems)
{
if (selectionArray[tid] == 1)
{
outArray[scatterAddressArray[tid] + addressOffset] = inArray[tid];
}
}
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's sort algorithm to sort the input integer array on the GPU,
// in non-decreasing order. The sort is performed in-place,
//
// NOTE:
// * The input/output array is already allocated in the device memory.
//===========================================================================
static void GPU_SortIntegerArray( int *d_inoutArray, int numElems )
{
thrust::device_ptr<int> dev_ptr( d_inoutArray );
thrust::sort( dev_ptr, dev_ptr + numElems );
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's scan algorithm to compute the "inclusive" all-prefix sums on the GPU.
// Also produces the sum of all elements in the input array in the output
// parameter *h_outInArraySum.
//
// NOTE: The input and output arrays are already allocated in the device memory.
//===========================================================================
static void GPU_AllPrefixSums( int *d_inArray, int *d_outArray, int numElems,
int *h_outInArraySum )
{
thrust::device_ptr<int> in_dev_ptr( d_inArray );
thrust::device_ptr<int> out_dev_ptr( d_outArray );
thrust::inclusive_scan( in_dev_ptr, in_dev_ptr + numElems, out_dev_ptr );
// Get the sum of all the elements in the input array. This can be obtained
// from the last element in the all-prefix-sums array.
checkCudaErrors( hipMemcpy( h_outInArraySum, d_outArray + numElems - 1,
sizeof(int), hipMemcpyDeviceToHost ) );
// Using Thrust, the above memory copy can be written as:
// *h_outInArraySum = out_dev_ptr[ numElems - 1 ];
}
//===========================================================================
// GPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// For example, if the input array is [ 5 3 7 5 8 3 1 3 1 8 ], the
// output array would be [ 1 3 5 7 8 ].
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//
// Here, a scan-and-scatter approach is used to do the stream compaction
// on the GPU. The following example demonstrates the steps.
//
// (0) Input array:
// inputArray[] = [ 5 3 7 5 8 3 1 3 1 8 ]
//
// (1) Sort inputArray[]:
// sortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// (2) Mark the unique elements in sortedArray[]:
// selectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// (3) Scan selectionArray[] ("inclusive" all-prefix sums):
// scatterAddressArray[] = [ 1 1 2 2 2 3 3 4 5 5 ]
//
// (4) Scatter sortedArray[] into outputArray[] using scatterAddressArray[] - 1:
// outputArray[] = [ 1 3 5 7 8 ]
//
// Note that the number of unique elements in the output array is the
// value of the last element in scatterAddressArray[].
//
// IMPORTANT: Step (1) to (4) must be computed on the GPU.
//
//===========================================================================
static void GPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
//---------------------------------------------------------------------------
// Allocate device memory and copy input array from host memory to
// device memory.
//---------------------------------------------------------------------------
// Allocate device memory.
int *d_sortedArray, *d_selectionArray, *d_scatterAddressArray, *d_outputArray;
checkCudaErrors( hipMalloc( (void**) &d_sortedArray, numInputElems * sizeof(int) ) );
checkCudaErrors( hipMalloc( (void**) &d_selectionArray, numInputElems * sizeof(int) ) );
checkCudaErrors( hipMalloc( (void**) &d_scatterAddressArray, numInputElems * sizeof(int) ) );
checkCudaErrors( hipMalloc( (void**) &d_outputArray, numInputElems * sizeof(int) ) );
// Will contain the number of unique elements in the output array.
int numSelectedElems = 0;
// Copy host input array to device memory.
checkCudaErrors( hipMemcpy( d_sortedArray, inputArray, numInputElems * sizeof(int),
hipMemcpyHostToDevice ) );
//---------------------------------------------------------------------------
// Do Step (1) to (4).
//---------------------------------------------------------------------------
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
// Step 1 Sort inputArray[]
GPU_SortIntegerArray(d_sortedArray, numInputElems);
// Step 2 Mark the unique elements in sortedArray[]
hipLaunchKernelGGL(( Kernel_MarkUnique) , dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_sortedArray, d_selectionArray, numInputElems);
// Step 3 Scan selectionArray[] ("inclusive" all-prefix sums)
GPU_AllPrefixSums(d_selectionArray, d_scatterAddressArray, numInputElems, &numSelectedElems);
// Step 4 Scatter sortedArray[] into outputArray[] using scatterAddressArray[] - 1
hipLaunchKernelGGL(( Kernel_Scatter) , dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_sortedArray, d_selectionArray, d_scatterAddressArray, -1, d_outputArray, numInputElems);
//---------------------------------------------------------------------------
// Copy the final result from the device memory to the host memory.
//---------------------------------------------------------------------------
checkCudaErrors( hipMemcpy( outputArray, d_outputArray, numSelectedElems * sizeof(int),
hipMemcpyDeviceToHost ) );
(*numUniqueElems) = numSelectedElems;
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Free device memory.
checkCudaErrors( hipFree( d_sortedArray ) );
checkCudaErrors( hipFree( d_selectionArray ) );
checkCudaErrors( hipFree( d_scatterAddressArray ) );
checkCudaErrors( hipFree( d_outputArray ) );
}
//===========================================================================
// Quicksort to sort the input integer array in-place in ascending order.
// To sort the entire input array, call Quicksort(array, 0, numElems-1).
//===========================================================================
#define SWAP(x, y, t) ((t)=(x),(x)=(y),(y)=(t))
static void Quicksort( int a[], int first, int last )
{
int tmp; // Temporary variable for SWAP.
if( first < last )
{
int pivot = a[first];
int i = first - 1;
int j = last + 1;
while( true )
{
do { j--; } while ( a[j] > pivot );
do { i++; } while ( a[i] < pivot );
if( i < j )
SWAP( a[i], a[j], tmp );
else
break;
}
Quicksort( a, first, j );
Quicksort( a, j + 1, last );
}
}
#undef SWAP
//===========================================================================
// CPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//===========================================================================
static void CPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
int *sortedArray = (int *) malloc( numInputElems * sizeof(int) );
memcpy( sortedArray, inputArray, numInputElems * sizeof(int) );
Quicksort( sortedArray, 0, numInputElems - 1 );
outputArray[0] = sortedArray[0];
int uniqueCount = 1;
for ( int i = 1; i < numInputElems; i++ )
if ( sortedArray[i] != sortedArray[i-1] )
outputArray[ uniqueCount++ ] = sortedArray[i];
(*numUniqueElems) = uniqueCount;
}
//===========================================================================
// Generates a set of random integers, each has value from elemMin to
// elemMax, and put them in the array intArray[].
//===========================================================================
static void GenerateRandomIntegers( int intArray[], int numElems, int elemMin, int elemMax )
{
for ( int i = 0; i < numElems; i++ )
{
int rand32 = rand() * (RAND_MAX + 1) + rand();
intArray[i] = rand32 % (elemMax - elemMin + 1) + elemMin;
}
}
//===========================================================================
// Return true iff all corresponding elements in the int
// arrays A and B are equal.
//===========================================================================
static bool IntArrayEqual( const int A[], const int B[], int numElems )
{
for ( int i = 0; i < numElems; i++ )
if ( A[i] != B[i] ) return false;
return true;
}
void WaitForEnterKeyBeforeExit( void )
{
fflush( stdin );
getchar();
}
//===========================================================================
// The main function
//===========================================================================
int main(int argc, char** argv)
{
atexit( WaitForEnterKeyBeforeExit );
// Set seed for rand().
srand( 927 );
// Use command-line specified CUDA device, otherwise use device with highest Gflops/s.
int devID = findCudaDevice( argc, (const char **)argv );
// Create a timer.
StopWatchInterface *timer = 0;
sdkCreateTimer( &timer );
//---------------------------------------------------------------------------
// Allocate host memory and generate test data.
//---------------------------------------------------------------------------
// Allocate host memory for input integer array.
int *inputArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Allocate host memory for result arrays.
int *cpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
int *gpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Number of unique elements in input array computed by different methods.
int cpu_numUniqueElems = 0;
int gpu_numUniqueElems = 0;
// Fill the input array with random integers.
GenerateRandomIntegers( inputArray, NUM_ELEMS, ELEM_MIN, ELEM_MAX );
//---------------------------------------------------------------------------
// Print some program parameter values.
//---------------------------------------------------------------------------
printf( "NUM_ELEMS = %d\n", NUM_ELEMS );
printf( "BLOCK_SIZE = %d\n", BLOCK_SIZE );
printf( "ELEM_MIN = %d\n", ELEM_MIN );
printf( "ELEM_MAX = %d\n", ELEM_MAX );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on CPU.
//---------------------------------------------------------------------------
printf( "CPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on CPU.
CPU_Unique( inputArray, NUM_ELEMS, cpu_uniqueArray, &cpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", cpu_numUniqueElems );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on GPU.
//---------------------------------------------------------------------------
printf( "GPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on GPU.
GPU_Unique( inputArray, NUM_ELEMS, gpu_uniqueArray, &gpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", gpu_numUniqueElems );
printf( "\n" );
// Check result with reference result computed by CPU.
bool equal = ( gpu_numUniqueElems == cpu_numUniqueElems ) &&
IntArrayEqual( cpu_uniqueArray, gpu_uniqueArray, cpu_numUniqueElems );
printf( "Verify GPU result... %s\n", (equal)? "PASS" : "FAIL" );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Destroy the timer.
sdkDeleteTimer( &timer );
// Free up memory.
free( inputArray );
free( cpu_uniqueArray );
free( gpu_uniqueArray );
hipDeviceReset();
}
| 2e98d837b6ba3d907ff52093550088300df845c3.cu | //============================================================
// STUDENT NAME: Zhou Yichen
// MATRIC NO. : A0113598X
// NUS EMAIL : [email protected]
// COMMENTS TO GRADER:
// Thanks for grading!
//
//============================================================
//
// FILE: unique.cu
// Include files from C standard library.
#include <stdlib.h>
#include <stdio.h>
#include <string.h> // For memcpy().
#include <math.h>
// Includes CUDA.
#include <cuda_runtime.h>
// Includes helper functions from CUDA Samples SDK.
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples.
// Include files to use Thrust (a C++ template library for CUDA).
// Thrust v1.7.0 is automatically installed with CUDA Toolkit 6.5.
// Read more about Thrust at the GitHub Thrust project page
// (http://thrust.github.com/).
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
/////////////////////////////////////////////////////////////////////////////
// CONSTANTS & GLOBAL VARIABLES
/////////////////////////////////////////////////////////////////////////////
#define NUM_ELEMS (5*1000000) // Number of elements in input array.
#define BLOCK_SIZE 256
#define NUM_BLOCKS ( ( (NUM_ELEMS) + (BLOCK_SIZE) - 1 ) / (BLOCK_SIZE) )
#define ELEM_MIN 1 // Minimum value in input array (must not be negative).
#define ELEM_MAX 100000 // Maximum value in input array (must not be negative).
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// Given an input sorted integer array, the kernel marks in the output array
// which elements of the input array should be kept/removed, so that if these
// elements were to be kept/removed, there would be no duplicate elements in
// the sorted array. We want to remove as few elements as possible from the
// input array.
//
// The output of the kernel is an array of 1's and 0's to indicate whether
// the corresponding elements in the input array should be kept or removed --
// a 1 means keep, and 0 means remove. The output array has the same number
// of elements as the input array.
//
// For example, given the following input array
//
// inSortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// the output would be
//
// outSelectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// so that if we keep only those elements in the input array that have a 1
// in the corresponding location in the output array, we will have the
// result [ 1 3 5 7 8 ].
//
// NOTE: You should use shared memory to minimize the number of uncoalesced
// global memory accesses. Shared memory conflicts must be minimized too.
//===========================================================================
__global__ void Kernel_MarkUnique( int *inSortedArray, int *outSelectionArray,
int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
__shared__ float sharedBlock[BLOCK_SIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
if (tid < numElems)
{
// Write the value to the shared block array
sharedBlock[tx] = inSortedArray[tid];
// sync to make sure all the values are written for the block
__syncthreads();
if (tid == 0)
{ // for the first element, always return 1
outSelectionArray[tid] = 1;
}
else
{
if (tx == 0) // for the first element in a block
{ // need to read the previous value from the gloabl memory as it is not in the current block.
if(sharedBlock[tx] > inSortedArray[tid - 1])
{
outSelectionArray[tid] = 1;
}
else
{
outSelectionArray[tid] = 0;
}
}
else
{ // otherwise can just read the previous value from the shared memory
if (sharedBlock[tx] > sharedBlock[tx - 1])
{
outSelectionArray[tid] = 1;
}
else
{
outSelectionArray[tid] = 0;
}
}
}
}
}
//===========================================================================
// CUDA kernel used by GPU_Unique().
//
// The kernel copies a selected set of elements from the input array to
// specified locations in the output array.
//
// For an input element inArray[i], if selectionArray[i] is 1, then
// the input element is copied to the output array outArray[].
// The location in the output array it is copied to is
// scatterAddressArray[i] + addressOffset.
//
// You can assume that no two elements in the input array inArray[]
// will be selected and copied to the same location in the output
// array outArray[].
//
// NOTE: You do not need to use shared memory, but try to keep the
// number of uncoalesced global memory accesses to the minimal.
//===========================================================================
__global__ void Kernel_Scatter( int *inArray, int *selectionArray,
int *scatterAddressArray, int addressOffset,
int *outArray, int numElems )
{
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numElems)
{
if (selectionArray[tid] == 1)
{
outArray[scatterAddressArray[tid] + addressOffset] = inArray[tid];
}
}
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's sort algorithm to sort the input integer array on the GPU,
// in non-decreasing order. The sort is performed in-place,
//
// NOTE:
// * The input/output array is already allocated in the device memory.
//===========================================================================
static void GPU_SortIntegerArray( int *d_inoutArray, int numElems )
{
thrust::device_ptr<int> dev_ptr( d_inoutArray );
thrust::sort( dev_ptr, dev_ptr + numElems );
}
//===========================================================================
// Used by GPU_Unique().
//
// Use Thrust's scan algorithm to compute the "inclusive" all-prefix sums on the GPU.
// Also produces the sum of all elements in the input array in the output
// parameter *h_outInArraySum.
//
// NOTE: The input and output arrays are already allocated in the device memory.
//===========================================================================
static void GPU_AllPrefixSums( int *d_inArray, int *d_outArray, int numElems,
int *h_outInArraySum )
{
thrust::device_ptr<int> in_dev_ptr( d_inArray );
thrust::device_ptr<int> out_dev_ptr( d_outArray );
thrust::inclusive_scan( in_dev_ptr, in_dev_ptr + numElems, out_dev_ptr );
// Get the sum of all the elements in the input array. This can be obtained
// from the last element in the all-prefix-sums array.
checkCudaErrors( cudaMemcpy( h_outInArraySum, d_outArray + numElems - 1,
sizeof(int), cudaMemcpyDeviceToHost ) );
// Using Thrust, the above memory copy can be written as:
// *h_outInArraySum = out_dev_ptr[ numElems - 1 ];
}
//===========================================================================
// GPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// For example, if the input array is [ 5 3 7 5 8 3 1 3 1 8 ], the
// output array would be [ 1 3 5 7 8 ].
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//
// Here, a scan-and-scatter approach is used to do the stream compaction
// on the GPU. The following example demonstrates the steps.
//
// (0) Input array:
// inputArray[] = [ 5 3 7 5 8 3 1 3 1 8 ]
//
// (1) Sort inputArray[]:
// sortedArray[] = [ 1 1 3 3 3 5 5 7 8 8 ]
//
// (2) Mark the unique elements in sortedArray[]:
// selectionArray[] = [ 1 0 1 0 0 1 0 1 1 0 ]
//
// (3) Scan selectionArray[] ("inclusive" all-prefix sums):
// scatterAddressArray[] = [ 1 1 2 2 2 3 3 4 5 5 ]
//
// (4) Scatter sortedArray[] into outputArray[] using scatterAddressArray[] - 1:
// outputArray[] = [ 1 3 5 7 8 ]
//
// Note that the number of unique elements in the output array is the
// value of the last element in scatterAddressArray[].
//
// IMPORTANT: Step (1) to (4) must be computed on the GPU.
//
//===========================================================================
static void GPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
//---------------------------------------------------------------------------
// Allocate device memory and copy input array from host memory to
// device memory.
//---------------------------------------------------------------------------
// Allocate device memory.
int *d_sortedArray, *d_selectionArray, *d_scatterAddressArray, *d_outputArray;
checkCudaErrors( cudaMalloc( (void**) &d_sortedArray, numInputElems * sizeof(int) ) );
checkCudaErrors( cudaMalloc( (void**) &d_selectionArray, numInputElems * sizeof(int) ) );
checkCudaErrors( cudaMalloc( (void**) &d_scatterAddressArray, numInputElems * sizeof(int) ) );
checkCudaErrors( cudaMalloc( (void**) &d_outputArray, numInputElems * sizeof(int) ) );
// Will contain the number of unique elements in the output array.
int numSelectedElems = 0;
// Copy host input array to device memory.
checkCudaErrors( cudaMemcpy( d_sortedArray, inputArray, numInputElems * sizeof(int),
cudaMemcpyHostToDevice ) );
//---------------------------------------------------------------------------
// Do Step (1) to (4).
//---------------------------------------------------------------------------
//***********************************************
//*********** WRITE YOUR CODE HERE **************
//***********************************************
// Step 1 Sort inputArray[]
GPU_SortIntegerArray(d_sortedArray, numInputElems);
// Step 2 Mark the unique elements in sortedArray[]
Kernel_MarkUnique <<<NUM_BLOCKS, BLOCK_SIZE>>> (d_sortedArray, d_selectionArray, numInputElems);
// Step 3 Scan selectionArray[] ("inclusive" all-prefix sums)
GPU_AllPrefixSums(d_selectionArray, d_scatterAddressArray, numInputElems, &numSelectedElems);
// Step 4 Scatter sortedArray[] into outputArray[] using scatterAddressArray[] - 1
Kernel_Scatter <<<NUM_BLOCKS, BLOCK_SIZE>>> (d_sortedArray, d_selectionArray, d_scatterAddressArray, -1, d_outputArray, numInputElems);
//---------------------------------------------------------------------------
// Copy the final result from the device memory to the host memory.
//---------------------------------------------------------------------------
checkCudaErrors( cudaMemcpy( outputArray, d_outputArray, numSelectedElems * sizeof(int),
cudaMemcpyDeviceToHost ) );
(*numUniqueElems) = numSelectedElems;
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Free device memory.
checkCudaErrors( cudaFree( d_sortedArray ) );
checkCudaErrors( cudaFree( d_selectionArray ) );
checkCudaErrors( cudaFree( d_scatterAddressArray ) );
checkCudaErrors( cudaFree( d_outputArray ) );
}
//===========================================================================
// Quicksort to sort the input integer array in-place in ascending order.
// To sort the entire input array, call Quicksort(array, 0, numElems-1).
//===========================================================================
#define SWAP(x, y, t) ((t)=(x),(x)=(y),(y)=(t))
static void Quicksort( int a[], int first, int last )
{
int tmp; // Temporary variable for SWAP.
if( first < last )
{
int pivot = a[first];
int i = first - 1;
int j = last + 1;
while( true )
{
do { j--; } while ( a[j] > pivot );
do { i++; } while ( a[i] < pivot );
if( i < j )
SWAP( a[i], a[j], tmp );
else
break;
}
Quicksort( a, first, j );
Quicksort( a, j + 1, last );
}
}
#undef SWAP
//===========================================================================
// CPU version.
//
// Given an input integer array, the function produces an output array
// which is a sorted version of the input array, but with duplicate
// elements removed. The output array is sorted in non-decreasing order.
// The function also produces the number of unique elements in the
// output array in the parameter (*numUniqueElems).
//
// When this function is called, sufficient memory storage must have
// already been allocated for the output array. The safest is to allocate
// as much memory as for the input array.
//===========================================================================
static void CPU_Unique( const int inputArray[], int numInputElems,
int outputArray[], int *numUniqueElems )
{
if ( numInputElems < 1 )
{
(*numUniqueElems) = 0;
return;
}
int *sortedArray = (int *) malloc( numInputElems * sizeof(int) );
memcpy( sortedArray, inputArray, numInputElems * sizeof(int) );
Quicksort( sortedArray, 0, numInputElems - 1 );
outputArray[0] = sortedArray[0];
int uniqueCount = 1;
for ( int i = 1; i < numInputElems; i++ )
if ( sortedArray[i] != sortedArray[i-1] )
outputArray[ uniqueCount++ ] = sortedArray[i];
(*numUniqueElems) = uniqueCount;
}
//===========================================================================
// Generates a set of random integers, each has value from elemMin to
// elemMax, and put them in the array intArray[].
//===========================================================================
static void GenerateRandomIntegers( int intArray[], int numElems, int elemMin, int elemMax )
{
for ( int i = 0; i < numElems; i++ )
{
int rand32 = rand() * (RAND_MAX + 1) + rand();
intArray[i] = rand32 % (elemMax - elemMin + 1) + elemMin;
}
}
//===========================================================================
// Return true iff all corresponding elements in the int
// arrays A and B are equal.
//===========================================================================
static bool IntArrayEqual( const int A[], const int B[], int numElems )
{
for ( int i = 0; i < numElems; i++ )
if ( A[i] != B[i] ) return false;
return true;
}
void WaitForEnterKeyBeforeExit( void )
{
fflush( stdin );
getchar();
}
//===========================================================================
// The main function
//===========================================================================
int main(int argc, char** argv)
{
atexit( WaitForEnterKeyBeforeExit );
// Set seed for rand().
srand( 927 );
// Use command-line specified CUDA device, otherwise use device with highest Gflops/s.
int devID = findCudaDevice( argc, (const char **)argv );
// Create a timer.
StopWatchInterface *timer = 0;
sdkCreateTimer( &timer );
//---------------------------------------------------------------------------
// Allocate host memory and generate test data.
//---------------------------------------------------------------------------
// Allocate host memory for input integer array.
int *inputArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Allocate host memory for result arrays.
int *cpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
int *gpu_uniqueArray = (int *) malloc( NUM_ELEMS * sizeof(int) );
// Number of unique elements in input array computed by different methods.
int cpu_numUniqueElems = 0;
int gpu_numUniqueElems = 0;
// Fill the input array with random integers.
GenerateRandomIntegers( inputArray, NUM_ELEMS, ELEM_MIN, ELEM_MAX );
//---------------------------------------------------------------------------
// Print some program parameter values.
//---------------------------------------------------------------------------
printf( "NUM_ELEMS = %d\n", NUM_ELEMS );
printf( "BLOCK_SIZE = %d\n", BLOCK_SIZE );
printf( "ELEM_MIN = %d\n", ELEM_MIN );
printf( "ELEM_MAX = %d\n", ELEM_MAX );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on CPU.
//---------------------------------------------------------------------------
printf( "CPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on CPU.
CPU_Unique( inputArray, NUM_ELEMS, cpu_uniqueArray, &cpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", cpu_numUniqueElems );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Perform computation on GPU.
//---------------------------------------------------------------------------
printf( "GPU COMPUTATION:\n" );
// Reset and start timer.
sdkResetTimer( &timer );
sdkStartTimer( &timer );
// Compute on GPU.
GPU_Unique( inputArray, NUM_ELEMS, gpu_uniqueArray, &gpu_numUniqueElems );
// Stop timer.
sdkStopTimer( &timer );
printf( "Processing time = %.3f ms\n", sdkGetTimerValue( &timer ) );
// Print some results.
printf( "Number of unique elements = %d\n", gpu_numUniqueElems );
printf( "\n" );
// Check result with reference result computed by CPU.
bool equal = ( gpu_numUniqueElems == cpu_numUniqueElems ) &&
IntArrayEqual( cpu_uniqueArray, gpu_uniqueArray, cpu_numUniqueElems );
printf( "Verify GPU result... %s\n", (equal)? "PASS" : "FAIL" );
printf( "\n\n" );
//---------------------------------------------------------------------------
// Clean up.
//---------------------------------------------------------------------------
// Destroy the timer.
sdkDeleteTimer( &timer );
// Free up memory.
free( inputArray );
free( cpu_uniqueArray );
free( gpu_uniqueArray );
cudaDeviceReset();
}
|
2eb5a56ff307b5e097f07087175e06d8b9543dcd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include "evaluate.h"
#include "util.h"
#include "reals.h"
#include "partial.h"
#include "../timing/timing.h"
#include "node_gpu.h"
#include <cutil_inline.h>
/* ------------------------------------------------------------------------
*/
int
get_byte_padding()
{
return getenv__int("BYTEPAD", 128);
}
/* ------------------------------------------------------------------------
*/
void
xlist_create_xlist (UList_t* U, int num_boxes, AllNodes* All_N)
{
int i, j, nu;
int list_size = 0;
assert (U && All_N);
vector<NodeTree>& nodeVec = *All_N->N;
/* allocate memory for ulist ptr */
U->n_boxes_ = num_boxes;
U->Ptr_ = (int *) malloc (sizeof (int) * (num_boxes + 1));
assert (U->Ptr_);
/* See how big ulist should be */
U->Ptr_[0] = 0;
for(i = 0; i < U->n_boxes_; i++) {
list_size += nodeVec[i].Xnodes.size ();
U->Ptr_[i + 1] = list_size;
}
/* allocate memory for ulist */
U->L_ = (int*) malloc (sizeof (int) * list_size);
assert (U->L_);
/* initialize ulist */
for(i = 0; i < U->n_boxes_; i++) {
nu = nodeVec[i].Xnodes.size ();
for(j = 0; j < nu; j++) {
U->L_[U->Ptr_[i] + j] = nodeVec[i].Xnodes[j];
}
}
}
void
wlist_create_wlist (UList_t* U, int num_boxes, AllNodes* All_N)
{
int i, j, nu;
int list_size = 0;
assert (U && All_N);
vector<NodeTree>& nodeVec = *All_N->N;
/* allocate memory for ulist ptr */
U->n_boxes_ = num_boxes;
U->Ptr_ = (int *) malloc (sizeof (int) * (num_boxes + 1));
assert (U->Ptr_);
/* See how big ulist should be */
U->Ptr_[0] = 0;
for(i = 0; i < U->n_boxes_; i++) {
list_size += nodeVec[i].Wnodes.size ();
U->Ptr_[i + 1] = list_size;
}
/* allocate memory for ulist */
U->L_ = (int*) malloc (sizeof (int) * list_size);
assert (U->L_);
/* initialize ulist */
for(i = 0; i < U->n_boxes_; i++) {
nu = nodeVec[i].Wnodes.size ();
for(j = 0; j < nu; j++) {
U->L_[U->Ptr_[i] + j] = nodeVec[i].Wnodes[j];
}
}
}
void
ulist_create_ulist (UList_t* U, int num_boxes, AllNodes* All_N)
{
int i, j, nu;
int list_size = 0;
assert (U && All_N);
vector<NodeTree>& nodeVec = *All_N->N;
/* allocate memory for ulist ptr */
U->n_boxes_ = num_boxes;
U->Ptr_ = (int *) malloc (sizeof (int) * (num_boxes + 1));
assert (U->Ptr_);
/* See how big ulist should be */
U->Ptr_[0] = 0;
for(i = 0; i < U->n_boxes_; i++) {
list_size += nodeVec[i].Unodes.size ();
U->Ptr_[i + 1] = list_size;
}
/* allocate memory for ulist */
U->L_ = (int*) malloc (sizeof (int) * list_size);
assert (U->L_);
/* initialize ulist */
for(i = 0; i < U->n_boxes_; i++) {
nu = nodeVec[i].Unodes.size ();
for(j = 0; j < nu; j++) {
U->L_[U->Ptr_[i] + j] = nodeVec[i].Unodes[j];
}
}
}
/* ------------------------------------------------------------------------
*/
void
ulist_create_boxes__double_source (AllNodes *All_N, FMMWrapper_t *F)
{
int i, j, n;
int padding, n_padded, n_points_, n_points_padded_;
vector<NodeTree>& nodeVec = *All_N->N;
Boxes_t *B;
Node *N;
B = &F->S_h_;
N = All_N->Ns;
assert (B && N);
padding = get_byte_padding () / sizeof (dtype);
B->n_boxes_ = nodeVec.size ();
B->Bptr_ = (int *) malloc (sizeof (int) * (B->n_boxes_ + 1));
B->Bn_ = (int *) malloc (sizeof (int) * B->n_boxes_);
assert (B->Bptr_ && B->Bn_);
n_points_ = 0;
n_points_padded_ = 0;
B->Bptr_[0] = 0;
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child == -1) {
n = N[i].num_pts;
n_padded = ((n + padding - 1) / padding) * padding;
assert (n_padded >= n);
B->Bn_[i] = n;
B->Bptr_[i + 1] = B->Bptr_[i] + n_padded;
n_points_ += n;
n_points_padded_ += n_padded;
} else {
B->Bn_[i] = 0;
B->Bptr_[i + 1] = B->Bptr_[i];
}
}
assert (n_points_padded_ == B->Bptr_[B->n_boxes_]);
B->n_points_ = n_points_;
B->x_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->y_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->z_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->w_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
assert (B->x_ && B->y_ && B->z_ && B->w_);
/* copy points */
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_SRCNODE) {
n = N[i].num_pts;
for(j = 0; j < n; j++) {
B->x_[B->Bptr_[i] + j] = N[i].x[j];
B->y_[B->Bptr_[i] + j] = N[i].y[j];
B->z_[B->Bptr_[i] + j] = N[i].z[j];
B->w_[B->Bptr_[i] + j] = N[i].den_pot[j];
}
}
}
}
void
ulist_create_boxes__double_target (AllNodes *All_N, FMMWrapper_t *F)
{
int i, j, n;
int padding, n_padded, n_points_, n_points_padded_;
vector<NodeTree>& nodeVec = *All_N->N;
Boxes_t *B;
Node *N;
B = &F->T_h_;
N = All_N->Nt;
assert (B && N);
padding = get_byte_padding () / sizeof (dtype);
B->n_boxes_ = nodeVec.size ();
B->Bptr_ = (int *) malloc (sizeof (int) * (B->n_boxes_ + 1));
B->Bn_ = (int *) malloc (sizeof (int) * B->n_boxes_);
assert (B->Bptr_ && B->Bn_);
n_points_ = 0;
n_points_padded_ = 0;
B->Bptr_[0] = 0;
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_TRGNODE && nodeVec[i].child == -1) {
n = N[i].num_pts;
n_padded = ((n + padding - 1) / padding) * padding;
assert (n_padded >= n);
B->Bn_[i] = n;
B->Bptr_[i + 1] = B->Bptr_[i] + n_padded;
n_points_ += n;
n_points_padded_ += n_padded;
} else {
B->Bn_[i] = 0;
B->Bptr_[i + 1] = B->Bptr_[i];
}
}
assert (n_points_padded_ == B->Bptr_[B->n_boxes_]);
B->n_points_ = n_points_;
B->x_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->y_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->z_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->w_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
assert (B->x_ && B->y_ && B->z_ && B->w_);
/* copy points */
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_TRGNODE) {
n = N[i].num_pts;
for(j = 0; j < n; j++) {
B->x_[B->Bptr_[i] + j] = N[i].x[j];
B->y_[B->Bptr_[i] + j] = N[i].y[j];
B->z_[B->Bptr_[i] + j] = N[i].z[j];
B->w_[B->Bptr_[i] + j] = N[i].den_pot[j];
}
}
}
}
void
ulist_create_boxes__double (Boxes_t* B,
int num_boxes,
const Node* N,
int padding)
{
int i, j, n, n_padded, min;
/* total number of points */
int n_points_ = 0;
int n_points_padded_ = 0;
/* check if structures that were passed in are valid */
assert (B && N);
/* allocate memory to data structures */
B->n_boxes_ = num_boxes;
B->Bptr_ = (int *) malloc (sizeof (int) * (B->n_boxes_ + 1));
B->Bn_ = (int *) malloc (sizeof (int) * B->n_boxes_);
assert (B->Bptr_ && B->Bn_);
/* initialize data structures */
B->Bptr_[0] = 0;
for(i = 0; i < num_boxes; i++) {
/* number of points in this box */
n = N[i].num_pts;
/* number of points in this box if padded */
n_padded = ((n + padding - 1) / padding) * padding;
assert (n_padded >= n);
/* make Bn_ and Bptr_ have/point to the right values */
B->Bn_[i] = n;
B->Bptr_[i+1] = B->Bptr_[i] + n_padded;
n_points_ += n;
n_points_padded_ += n_padded;
}
assert (n_points_padded_ == B->Bptr_[B->n_boxes_]);
/* allocate memory to data structures that are going to hold the values */
B->n_points_ = n_points_;
B->x_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
B->y_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
B->z_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
B->w_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
assert (B->x_ && B->y_ && B->z_ && B->w_);
/* copy points */
for(i = 0; i < num_boxes; i++) {
n = N[i].num_pts;
min = B->Bptr_[i];
for(j = 0; j < n; j++) {
B->x_[min + j] = N[i].x[j];
B->y_[min + j] = N[i].y[j];
B->z_[min + j] = N[i].z[j];
B->w_[min + j] = N[i].den_pot[j];
}
}
}
/* ------------------------------------------------------------------------
*/
void
alloc__SOURCE_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Source boxes */
f->S_d_.n_points_ = f->S_h_.n_points_;
f->S_d_.n_boxes_ = f->S_h_.n_boxes_;
/* Allocate memory for data */
cutilSafeCall (hipMalloc ((void**)&f->S_d_.x_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->S_d_.y_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->S_d_.z_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->S_d_.w_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
/* Allocate memory for pointers */
cutilSafeCall (hipMalloc ((void**)&f->S_d_.Bptr_,
(f->S_d_.n_boxes_ + 1) * sizeof (int)));
cutilSafeCall (hipMalloc ((void**)&f->S_d_.Bn_,
f->S_d_.n_boxes_ * sizeof (int)));
assert (&f->S_d_ && &f->S_h_);
/* ------------------------------------------------------------ */
}
void
alloc__TARGET_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Target boxes */
f->T_d_.n_points_ = f->T_h_.n_points_;
f->T_d_.n_boxes_ = f->T_h_.n_boxes_;
/* Allocate memory for data */
cutilSafeCall (hipMalloc ((void**)&f->T_d_.x_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->T_d_.y_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->T_d_.z_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->T_d_.w_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
/* Allocate memory for pointers */
cutilSafeCall (hipMalloc ((void**)&f->T_d_.Bptr_,
(f->T_h_.n_boxes_ + 1) * sizeof (int)));
cutilSafeCall (hipMalloc ((void**)&f->T_d_.Bn_,
f->T_h_.n_boxes_ * sizeof (int)));
assert (&f->T_d_ && &f->T_h_);
/* ------------------------------------------------------------ */
}
void
alloc__U_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
f->U_d_.n_boxes_ = f->U_h_.n_boxes_;
cutilSafeCall (hipMalloc ((void**)&f->U_d_.L_,
f->U_h_.Ptr_[f->U_h_.n_boxes_] * sizeof (int)));
cutilSafeCall (hipMalloc ((void**)&f->U_d_.Ptr_,
(f->U_h_.n_boxes_ + 1) * sizeof (int)));
assert (&f->U_d_ && &f->U_h_);
/* ------------------------------------------------------------ */
}
void
alloc__TAG__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* tag */
cutilSafeCall (hipMalloc ((void**)&f->tag_d_,
nodeVec.size () * sizeof (int)));
assert (f->tag_d_);
/* ------------------------------------------------------------ */
}
void
alloc__DEPTH__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* depth */
cutilSafeCall (hipMalloc ((void**)&f->depth_d_,
nodeVec.size () * sizeof (int)));
assert (f->depth_d_);
/* ------------------------------------------------------------ */
}
void
alloc__CHILDREN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* children */
/*
cutilSafeCall (hipMalloc ((void**)&f->child_d_,
num_non_leaf_nodes * sizeof (int)));
*/
cutilSafeCall (hipMalloc ((void**)&f->child_d_,
nodeVec.size () * sizeof (int)));
assert (f->child_d_);
/* ------------------------------------------------------------ */
}
void
alloc__RADIUS__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* radius */
/*
cutilSafeCall (hipMalloc ((void**)&f->radius_d_,
num_leaf_nodes * sizeof (dtype)));
*/
cutilSafeCall (hipMalloc ((void**)&f->radius_d_,
nodeVec.size () * sizeof (dtype)));
assert (f->radius_d_);
/* ------------------------------------------------------------ */
}
void
alloc__CENTER__ (FMMWrapper_t* f) {
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* radius */
/*
cutilSafeCall (hipMalloc ((void**)&f->radius_d_,
num_leaf_nodes * sizeof (dtype)));
*/
cutilSafeCall (hipMalloc ((void**)&f->radius_d_,
nodeVec.size () * sizeof (dtype)));
/* ------------------------------------------------------------ */
/* center */
cutilSafeCall (hipMalloc ((void**)&f->center0_d_,
nodeVec.size () * sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->center1_d_,
nodeVec.size () * sizeof (dtype)));
cutilSafeCall (hipMalloc ((void**)&f->center2_d_,
nodeVec.size () * sizeof (dtype)));
assert (f->center0_d_ && f->center1_d_ && f->center2_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SP_UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[UC] */
cutilSafeCall (hipMalloc ((void**)&f->SP_UC_d_,
3 * f->SP_UC_size_padded * sizeof (dtype)));
assert (f->SP_UC_d_);
/* ------------------------------------------------------------ */
}
void
alloc__UC2UE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UC2UE matrix */
cutilSafeCall (hipMalloc ((void**)&f->UC2UE_d_,
f->UC2UE_r_padded * f->UC2UE_c * sizeof (dtype)));
assert (f->UC2UE_d_);
/* ------------------------------------------------------------ */
}
void
alloc__UE2UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UE2UC matrix */
cutilSafeCall (hipMalloc ((void**)&f->UE2UC_d_,
(2 * 2 * 2) * (f->UE2UC_r_padded * f->UE2UC_c) *
sizeof (dtype)));
assert (f->UE2UC_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SRC_UPW_EQU_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* Temporary up_calc GPU variables */
/* src_upw_equ_den */
cutilSafeCall (hipMalloc ((void**)&f->SRC_UPW_EQU_DEN_d_,
nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype)));
assert (f->SRC_UPW_EQU_DEN_d_);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_SRC__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* src */
cutilSafeCall (hipMalloc ((void**)&f->vlist_src_d_,
nodeVec.size () * f->vlist_array_size *
sizeof (dtype)));
assert (f->vlist_src_d_);
/* ------------------------------------------------------------ */
}
void
alloc__REG_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* reg_den */
cutilSafeCall (hipMalloc ((void**)&f->reg_den_d_,
nodeVec.size () * f->reg_den_size *
sizeof (dtype)));
assert (f->reg_den_d_);
/* ------------------------------------------------------------ */
}
void
alloc__TT__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* tt */
cutilSafeCall (hipMalloc ((void**)&f->tt, f->trans_arrays_num * f->RP_n_ *
sizeof (dtype)));
assert (f->tt);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_TRANS__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trans */
cutilSafeCall (hipMalloc ((void**)&f->vlist_trans_d_,
f->trans_arrays_num * f->vlist_array_size *
sizeof (dtype)));
assert (f->vlist_trans_d_);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_TRG__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* trg */
cutilSafeCall (hipMalloc ((void**)&f->vlist_trg_d_,
nodeVec.size () * f->vlist_array_size *
sizeof (dtype)));
assert (f->vlist_trg_d_);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_TLIST__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* vlist and tlist and pointer */
/* pointer */
cutilSafeCall (hipMalloc ((void**)&f->vlist_ptr_d_,
(nodeVec.size () + 1) * sizeof (int)));
assert (f->vlist_ptr_d_);
/* vlist */
cutilSafeCall (hipMalloc ((void**)&f->vlist_d_, f->list_size * sizeof (int)));
assert (f->vlist_d_);
/* tlist */
cutilSafeCall (hipMalloc ((void**)&f->tlist_d_, f->list_size * sizeof (int)));
assert (f->tlist_d_);
/* ------------------------------------------------------------ */
}
void
alloc__TRG_DWN_CHK_VAL__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* trg_dwn_chk_val */
cutilSafeCall (hipMalloc ((void**)&f->TRG_DWN_CHK_VAL_d_,
nodeVec.size () * f->SP_DC_n_padded_ *
sizeof (dtype)));
assert (f->TRG_DWN_CHK_VAL_d_);
/* ------------------------------------------------------------ */
}
void
alloc__PATH2NODE__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* path2Node */
cutilSafeCall (hipMalloc ((void**)&f->path2Node_d_,
nodeVec.size () * sizeof (int3)));
assert (f->path2Node_d_);
/* ------------------------------------------------------------ */
}
void
alloc__PARENT__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* parent */
/* Not needed - use children structure instead */
/* Now I need it */
cutilSafeCall (hipMalloc ((void**)&f->parent_d_,
nodeVec.size () * sizeof (int)));
assert (f->parent_d_);
/* ------------------------------------------------------------ */
}
void
alloc__TRG_DWN_EQU_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* trg_dwn_equ_den */
cutilSafeCall (hipMalloc ((void**)&f->TRG_DWN_EQU_DEN_d_,
nodeVec.size () * f->SP_DE_n_padded *
sizeof (dtype)));
assert (f->TRG_DWN_EQU_DEN_d_);
/* ------------------------------------------------------------ */
}
void
alloc__DC2DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DC2DE_mat */
cutilSafeCall (hipMalloc ((void**)&f->DC2DE_d_,
f->DC2DE_r_padded * f->DC2DE_c * sizeof (dtype)));
assert (f->DC2DE_d_);
/* ------------------------------------------------------------ */
}
void
alloc__DE2DC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DE2DC_mat[8] */
cutilSafeCall (hipMalloc ((void**)&f->DE2DC_d_,
(2 * 2 * 2) * f->DE2DC_r_padded * f->DE2DC_c *
sizeof (dtype)));
assert (f->DE2DC_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SP_DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[DE] */
cutilSafeCall (hipMalloc ((void**)&f->SP_DE_d_,
3 * f->SP_DE_n_padded * sizeof (dtype)));
assert (f->SP_DE_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SP_UE__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMalloc ((void**)&f->SP_UE_d_,
3 * f->SP_UE_n_padded * sizeof (dtype)));
assert (f->SP_UE_d_);
}
void
alloc__W_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
f->W_d_.n_boxes_ = f->W_h_.n_boxes_;
cutilSafeCall (hipMalloc ((void**)&f->W_d_.L_,
f->W_h_.Ptr_[f->W_h_.n_boxes_] * sizeof (int)));
cutilSafeCall (hipMalloc ((void**)&f->W_d_.Ptr_,
(f->W_h_.n_boxes_ + 1) * sizeof (int)));
assert (&f->W_d_ && &f->W_h_);
/* ------------------------------------------------------------ */
}
void
alloc__SRCNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMalloc ((void**)&f->srcNum_d_,
nodeVec.size () * sizeof (int)));
assert (f->srcNum_d_);
}
void
alloc__SP_DC__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMalloc ((void**)&f->SP_DC_d_,
3 * f->SP_DC_n_padded_ * sizeof (dtype)));
assert (f->SP_DC_d_);
}
void
alloc__X_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
f->X_d_.n_boxes_ = f->X_h_.n_boxes_;
cutilSafeCall (hipMalloc ((void**)&f->X_d_.L_,
f->X_h_.Ptr_[f->X_h_.n_boxes_] * sizeof (int)));
cutilSafeCall (hipMalloc ((void**)&f->X_d_.Ptr_,
(f->X_h_.n_boxes_ + 1) * sizeof (int)));
assert (&f->X_d_ && &f->X_h_);
/* ------------------------------------------------------------ */
}
void
alloc__TRGNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMalloc ((void**)&f->trgNum_d_,
nodeVec.size () * sizeof (int)));
assert (f->trgNum_d_);
}
void
xfer__SOURCE_BOX__ (FMMWrapper_t* f)
{
/* Source boxes */
cutilSafeCall (hipMemcpy (f->S_d_.x_, f->S_h_.x_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->S_d_.y_, f->S_h_.y_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->S_d_.z_, f->S_h_.z_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->S_d_.w_, f->S_h_.w_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->S_d_.Bptr_, f->S_h_.Bptr_,
(f->S_h_.n_boxes_ + 1) * sizeof (int),
hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->S_d_.Bn_, f->S_h_.Bn_,
f->S_h_.n_boxes_ * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__TARGET_BOX__ (FMMWrapper_t* f)
{
/* Target boxes */
cutilSafeCall (hipMemcpy (f->T_d_.x_, f->T_h_.x_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->T_d_.y_, f->T_h_.y_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->T_d_.z_, f->T_h_.z_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->T_d_.w_, f->T_h_.w_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->T_d_.Bptr_, f->T_h_.Bptr_,
(f->T_h_.n_boxes_ + 1) * sizeof (int),
hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->T_d_.Bn_, f->T_h_.Bn_,
f->T_h_.n_boxes_ * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__U_LIST__ (FMMWrapper_t* f)
{
/* Ulist */
cutilSafeCall (hipMemcpy (f->U_d_.L_, f->U_h_.L_,
f->U_h_.Ptr_[f->U_h_.n_boxes_] *
sizeof (int), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->U_d_.Ptr_, f->U_h_.Ptr_,
(f->U_h_.n_boxes_ + 1) * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__TAG__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->tag_d_, f->tag_h_,
nodeVec.size () * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__DEPTH__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* depth */
cutilSafeCall (hipMemcpy (f->depth_d_, f->depth_h_,
nodeVec.size () * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__CHILDREN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->child_d_, f->child_h_,
nodeVec.size () * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__RADIUS__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->radius_d_, f->radius_h_,
nodeVec.size () * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__CENTER__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->center0_d_, f->center0_h_,
nodeVec.size () * sizeof (dtype),
hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->center1_d_, f->center1_h_,
nodeVec.size () * sizeof (dtype),
hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->center2_d_, f->center2_h_,
nodeVec.size () * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__SP_UC__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->SP_UC_d_, f->SP_UC_h_,
3 * f->SP_UC_size_padded * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__UC2UE__ (FMMWrapper_t* f)
{
/* UC2UE matrix */
cutilSafeCall (hipMemcpy (f->UC2UE_d_, f->UC2UE_h_,
f->UC2UE_r_padded * f->UC2UE_c * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__UE2UC__ (FMMWrapper_t* f)
{
/* UE2UC matrix */
cutilSafeCall (hipMemcpy (f->UE2UC_d_, f->UE2UC_h_,
(2 * 2 * 2) * (f->UE2UC_r_padded * f->UE2UC_c) *
sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__SRC_UPW_EQU_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->SRC_UPW_EQU_DEN_d_, f->SRC_UPW_EQU_DEN_h_,
nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__VLIST_TLIST__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->vlist_d_, f->vlist_h_,
f->list_size * sizeof (int),
hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->tlist_d_, f->tlist_h_,
f->list_size * sizeof (int),
hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->vlist_ptr_d_, f->vlist_ptr_h_,
(nodeVec.size () + 1) * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__TRG_DWN_CHK_VAL__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->TRG_DWN_CHK_VAL_d_, f->TRG_DWN_CHK_VAL_h_,
nodeVec.size () * f->SP_DC_n_padded_ *
sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__PATH2NODE__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->path2Node_d_, f->path2Node_h_,
nodeVec.size () * sizeof (int3),
hipMemcpyHostToDevice));
}
void
xfer__PARENT__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->parent_d_, f->parent_h_,
nodeVec.size () * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__DC2DE__(FMMWrapper_t* f)
{
/* DC2DE_mat */
cutilSafeCall (hipMemcpy (f->DC2DE_d_, f->DC2DE_h_,
f->DC2DE_r_padded * f->DC2DE_c * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__DE2DC__(FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->DE2DC_d_, f->DE2DC_h_,
(2 * 2 * 2) * f->DE2DC_r_padded * f->DE2DC_c *
sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__SP_DE__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->SP_DE_d_, f->SP_DE_h_,
3 * f->SP_DE_n_padded * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__SP_UE__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->SP_UE_d_, f->SP_UE_h_,
3 * f->SP_UE_n_padded * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__W_LIST__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->W_d_.L_, f->W_h_.L_,
f->W_h_.Ptr_[f->W_h_.n_boxes_] *
sizeof (int), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->W_d_.Ptr_, f->W_h_.Ptr_,
(f->W_h_.n_boxes_ + 1) * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__SRCNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->srcNum_d_, f->srcNum_h_,
nodeVec.size () * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__SP_DC__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->SP_DC_d_, f->SP_DC_h_,
3 * f->SP_DC_n_padded_ * sizeof (dtype),
hipMemcpyHostToDevice));
}
void
xfer__X_LIST__ (FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->X_d_.L_, f->X_h_.L_,
f->X_h_.Ptr_[f->X_h_.n_boxes_] *
sizeof (int), hipMemcpyHostToDevice));
cutilSafeCall (hipMemcpy (f->X_d_.Ptr_, f->X_h_.Ptr_,
(f->X_h_.n_boxes_ + 1) * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__TRGNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->trgNum_d_, f->trgNum_h_,
nodeVec.size () * sizeof (int),
hipMemcpyHostToDevice));
}
void
xfer__SRC_UPW_EQU_DEN__back (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->SRC_UPW_EQU_DEN_h_, f->SRC_UPW_EQU_DEN_d_,
nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype),
hipMemcpyDeviceToHost));
}
void
xfer__TRG_DWN_CHK_VAL__back (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (hipMemcpy (f->TRG_DWN_CHK_VAL_h_, f->TRG_DWN_CHK_VAL_d_,
nodeVec.size () * f->SP_DC_n_padded_ *
sizeof (dtype),
hipMemcpyDeviceToHost));
}
void
xfer__TARGET_BOX__back (FMMWrapper_t* f)
{
cutilSafeCall (hipMemcpy (f->T_h_.w_, f->T_d_.w_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), hipMemcpyDeviceToHost));
}
void
free__SOURCE_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Deallocate memory for data */
cutilSafeCall (hipFree (f->S_d_.x_));
cutilSafeCall (hipFree (f->S_d_.y_));
cutilSafeCall (hipFree (f->S_d_.z_));
cutilSafeCall (hipFree (f->S_d_.w_));
/* Deallocate memory for pointers */
cutilSafeCall (hipFree (f->S_d_.Bptr_));
cutilSafeCall (hipFree (f->S_d_.Bn_));
/* ------------------------------------------------------------ */
}
void
free__TARGET_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Target boxes */
cutilSafeCall (hipFree (f->T_d_.x_));
cutilSafeCall (hipFree (f->T_d_.y_));
cutilSafeCall (hipFree (f->T_d_.z_));
cutilSafeCall (hipFree (f->T_d_.w_));
/* Allocate memory for pointers */
cutilSafeCall (hipFree (f->T_d_.Bptr_));
cutilSafeCall (hipFree (f->T_d_.Bn_));
/* ------------------------------------------------------------ */
}
void
free__U_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
cutilSafeCall (hipFree (f->U_d_.L_));
cutilSafeCall (hipFree (f->U_d_.Ptr_));
/* ------------------------------------------------------------ */
}
void
free__TAG__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* tag */
cutilSafeCall (hipFree (f->tag_d_));
/* ------------------------------------------------------------ */
}
void
free__DEPTH__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* depth */
cutilSafeCall (hipFree (f->depth_d_));
/* ------------------------------------------------------------ */
}
void
free__CHILDREN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* children */
cutilSafeCall (hipFree (f->child_d_));
/* ------------------------------------------------------------ */
}
void
free__RADIUS__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* radius */
cutilSafeCall (hipFree (f->radius_d_));
/* ------------------------------------------------------------ */
}
void
free__CENTER__ (FMMWrapper_t* f) {
/* ------------------------------------------------------------ */
cutilSafeCall (hipFree (f->center0_d_));
cutilSafeCall (hipFree (f->center1_d_));
cutilSafeCall (hipFree (f->center2_d_));
/* ------------------------------------------------------------ */
}
void
free__SP_UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[UC] */
cutilSafeCall (hipFree (f->SP_UC_d_));
/* ------------------------------------------------------------ */
}
void
free__UC2UE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UC2UE matrix */
cutilSafeCall (hipFree (f->UC2UE_d_));
/* ------------------------------------------------------------ */
}
void
free__UE2UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UE2UC matrix */
cutilSafeCall (hipFree (f->UE2UC_d_));
/* ------------------------------------------------------------ */
}
void
free__SRC_UPW_EQU_DEN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Temporary up_calc GPU variables */
/* src_upw_equ_den */
cutilSafeCall (hipFree (f->SRC_UPW_EQU_DEN_d_));
/* ------------------------------------------------------------ */
}
void
free__VLIST_SRC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* src */
cutilSafeCall (hipFree (f->vlist_src_d_));
/* ------------------------------------------------------------ */
}
void
free__REG_DEN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* reg_den */
cutilSafeCall (hipFree (f->reg_den_d_));
/* ------------------------------------------------------------ */
}
void
free__TT__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* tt */
cutilSafeCall (hipFree (f->tt));
/* ------------------------------------------------------------ */
}
void
free__VLIST_TRANS__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trans */
cutilSafeCall (hipFree (f->vlist_trans_d_));
/* ------------------------------------------------------------ */
}
void
free__VLIST_TRG__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trg */
cutilSafeCall (hipFree (f->vlist_trg_d_));
/* ------------------------------------------------------------ */
}
void
free__VLIST_TLIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* vlist and tlist and pointer */
/* pointer */
cutilSafeCall (hipFree (f->vlist_ptr_d_));
/* vlist */
cutilSafeCall (hipFree (f->vlist_d_));
/* tlist */
cutilSafeCall (hipFree (f->tlist_d_));
/* ------------------------------------------------------------ */
}
void
free__TRG_DWN_CHK_VAL__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trg_dwn_chk_val */
cutilSafeCall (hipFree (f->TRG_DWN_CHK_VAL_d_));
/* ------------------------------------------------------------ */
}
void
free__PATH2NODE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* path2Node */
cutilSafeCall (hipFree (f->path2Node_d_));
/* ------------------------------------------------------------ */
}
void
free__PARENT__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* parent */
/* Not needed - use children structure instead */
/* Now I need it */
cutilSafeCall (hipFree (f->parent_d_));
/* ------------------------------------------------------------ */
}
void
free__TRG_DWN_EQU_DEN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trg_dwn_equ_den */
cutilSafeCall (hipFree (f->TRG_DWN_EQU_DEN_d_));
/* ------------------------------------------------------------ */
}
void
free__DC2DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DC2DE_mat */
cutilSafeCall (hipFree (f->DC2DE_d_));
/* ------------------------------------------------------------ */
}
void
free__DE2DC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DE2DC_mat[8] */
cutilSafeCall (hipFree (f->DE2DC_d_));
/* ------------------------------------------------------------ */
}
void
free__SP_DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[DE] */
cutilSafeCall (hipFree (f->SP_DE_d_));
/* ------------------------------------------------------------ */
}
void
free__SP_UE__ (FMMWrapper_t* f)
{
cutilSafeCall (hipFree (f->SP_UE_d_));
}
void
free__W_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
cutilSafeCall (hipFree (f->W_d_.L_));
cutilSafeCall (hipFree (f->W_d_.Ptr_));
/* ------------------------------------------------------------ */
}
void
free__SRCNUM__ (FMMWrapper_t* f)
{
cutilSafeCall (hipFree (f->srcNum_d_));
}
void
free__SP_DC__ (FMMWrapper_t* f)
{
cutilSafeCall (hipFree (f->SP_DC_d_));
}
void
free__X_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
cutilSafeCall (hipFree (f->X_d_.L_));
cutilSafeCall (hipFree (f->X_d_.Ptr_));
/* ------------------------------------------------------------ */
}
void
free__TRGNUM__ (FMMWrapper_t* f)
{
cutilSafeCall (hipFree (f->trgNum_d_));
}
FMMWrapper_t *
preproc (AllNodes* All_N)
{
FMMWrapper_t* f = (FMMWrapper_t *) malloc (sizeof (FMMWrapper_t));
assert (f);
f->AN = All_N;
int i, j, idx;
Point3 c;
real_t r;
// int num_leaf_nodes;
// int num_non_leaf_nodes;
int num_leaf_nodes_src;
int num_leaf_nodes_trg;
int num_non_leaf_nodes_src;
int num_non_leaf_nodes_trg;
int list_size;
Pos *SP = All_N->SP;
Trans_matrix *TM = All_N->TM;
Pos *RP = All_N->RP;
struct stopwatch_t* timer = NULL;
struct stopwatch_t* timer_ = NULL;
long double t_data_cpu, t_data_gpu, t_pcie, t_subtract;
stopwatch_init ();
timer = stopwatch_create ();
timer_ = stopwatch_create ();
/* ============================================================= */
/* CPU SIDE
*/
real_t* tmp_c;
tmp_c = (real_t*) malloc (1024 * 1024);
assert (tmp_c);
fprintf (stderr, "Creating Host Data Structures ... ");
stopwatch_start (timer);
/* byte alignment required for coalesced loading */
int byte_padding = get_byte_padding ();
/* real_padding is padding in terms of # of data elements */
int real_padding = byte_padding / sizeof (dtype);
/* Create GPU friendly Source boxes */
vector<NodeTree>& nodeVec = *All_N->N;
/*
ulist_create_boxes__double (&f->S_h_, nodeVec.size (),
All_N->Ns, real_padding);
*/
ulist_create_boxes__double_source (All_N, f);
/* Create GPU friendly Target boxes */
/*
ulist_create_boxes__double (&f->T_h_, nodeVec.size (),
All_N->Nt, real_padding);
*/
ulist_create_boxes__double_target (All_N, f);
/* Create GPU friendly ulist data structure */
ulist_create_ulist (&f->U_h_, nodeVec.size (), All_N);
/* depth and children */
f->depth_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->depth_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->depth_h_[i] = nodeVec[i].depth;
}
// num_leaf_nodes = 0;
// num_non_leaf_nodes = 0;
/*
num_leaf_nodes = (int) pow (8.0, nodeVec[nodeVec.size () - 1].depth);
num_non_leaf_nodes = nodeVec.size () - num_leaf_nodes;
assert ((num_leaf_nodes + num_non_leaf_nodes) == nodeVec.size ());
*/
/* num leaf and non-leaf nodes for src and trg */
num_leaf_nodes_src = 0;
num_non_leaf_nodes_src = 0;
for(i = 0; i < nodeVec.size (); i++) {
if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child == -1) {
num_leaf_nodes_src++;
} else if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child != -1) {
num_non_leaf_nodes_src++;
}
}
num_leaf_nodes_trg = 0;
num_non_leaf_nodes_trg = 0;
for(i = 0; i < nodeVec.size (); i++) {
if(nodeVec[i].tag & LET_TRGNODE && nodeVec[i].child == -1) {
num_leaf_nodes_trg++;
} else if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child != -1) {
num_non_leaf_nodes_trg++;
}
}
f->child_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->child_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->child_h_[i] = nodeVec[i].child;
}
/* Pre-compute center and radius */
f->radius_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
f->center0_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
f->center1_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
f->center2_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
assert (f->radius_h_ && f->center0_h_ && f->center1_h_ && f->center2_h_);
// idx = 0;
for(i = 0; i < nodeVec.size (); i++) {
c = center (i, nodeVec);
r = radius (i, nodeVec);
f->radius_h_[i] = r;
f->center0_h_[i] = c(0);
f->center1_h_[i] = c(1);
f->center2_h_[i] = c(2);
}
/* tag */
/* SRC or TG */
f->tag_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
for(i = 0; i < nodeVec.size (); i++) {
f->tag_h_[i] = nodeVec[i].tag;
}
/* SP[UC] */
/* SP[UC] consists of 3 arrays x, y, and z each of which are
* (np+2)^3 - (np)^3 long
*/
/* allocate memory */
f->SP_UC_size = pln_size (UC, SP);
f->SP_UC_size_padded = (((pln_size (UC, SP) + real_padding - 1) /
real_padding) * real_padding);
/* 3x for x, y, and z */
f->SP_UC_h_ = (real_t*) malloc (3 * f->SP_UC_size_padded * sizeof (real_t));
assert (f->SP_UC_h_);
/* initialize data */
memcpy (&f->SP_UC_h_[0], SP[UC].x, SP[UC].n * sizeof (real_t));
memcpy (&f->SP_UC_h_[1 * f->SP_UC_size_padded], SP[UC].y,
SP[UC].n * sizeof (real_t));
memcpy (&f->SP_UC_h_[2 * f->SP_UC_size_padded], SP[UC].z,
SP[UC].n * sizeof (real_t));
/* UC2UE */
stopwatch_start (timer_);
compute_UC2UE_mat (TM, SP);
t_subtract = stopwatch_stop (timer_);
f->UC2UE_r = pln_size (UE, SP);
f->UC2UE_r_padded = (((f->UC2UE_r + real_padding - 1) / real_padding) *
real_padding);
f->UC2UE_c = pln_size (UC, SP);
f->UC2UE_h_ = (real_t*) malloc (f->UC2UE_r_padded * f->UC2UE_c *
sizeof (real_t));
assert (f->UC2UE_h_);
for(i = 0; i < f->UC2UE_c; i++) {
memcpy (&f->UC2UE_h_[i * f->UC2UE_r_padded], &TM->UC2UE[i * f->UC2UE_r],
f->UC2UE_r * sizeof (real_t));
}
/* UE2UC */
stopwatch_start (timer_);
TM->UE2UC = (real_t**) malloc (sizeof (real_t*) * 2 * 2 * 2);
for(int a_ = 0; a_ < 2; a_++) {
for(int b_ = 0; b_ < 2; b_++) {
for(int c_ = 0; c_ < 2; c_++) {
Index3 idx3(a_, b_, c_);
compute_UE2UC_mat (idx3, TM, SP);
}
}
}
t_subtract += stopwatch_stop (timer_);
f->UE2UC_r = pln_size (UC, SP);
f->UE2UC_r_padded = (((f->UE2UC_r + real_padding - 1) / real_padding) *
real_padding);
f->UE2UC_c = pln_size (UE, SP);
f->UE2UC_h_ = (real_t*) malloc ((2 * 2 * 2) * (f->UE2UC_r_padded * f->UE2UC_c)
* sizeof (real_t));
assert (f->UE2UC_h_);
/* copy each matrix */
for(i = 0; i < (2 * 2 * 2); i++) {
/* 1 column at a time */
for(j = 0; j < f->UE2UC_c; j++) {
memcpy (&f->UE2UC_h_[i * f->UE2UC_r_padded * f->UE2UC_c +
j * f->UE2UC_r_padded],
&TM->UE2UC[i][j * f->UE2UC_r], f->UE2UC_r * sizeof (dtype));
}
}
/* SRC_UPW_EQU_DEN_h_ */
f->SRC_UPW_EQU_DEN_h_ = (dtype*) malloc (nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype));
assert (f->SRC_UPW_EQU_DEN_h_);
/* up_calc__gpu configuration variables */
// f->num_non_leaf_nodes = num_non_leaf_nodes;
f->tree_max_depth = nodeVec[nodeVec.size () - 1].depth;
f->reduction_depth = f->tree_max_depth - 2;
f->num_nodes_reduction = (int) pow (8.0, f->reduction_depth);
f->reduction_offset = 0;
for(i = 0; i < f->reduction_depth; i++)
f->reduction_offset += (int) pow (8.0, i);
/* up_calc temporary arrays */
/* src_upw_equ_den */
/* There is no CPU equivalent of this as all this is needed is in the GPU */
/* VLIST data structures */
f->vlist_array_size = eff_data_size (UE);
/*
f->vlist_array_size_padded = (((f->vlist_array_size + real_padding - 1) /
real_padding) * real_padding);
*/
/* trg */
/* src */
f->vlist_ptr_h_ = (int*) malloc ((nodeVec.size () + 1) * sizeof (int));
assert (f->vlist_ptr_h_);
list_size = 0;
f->vlist_ptr_h_[0] = 0;
for(i = 0; i < nodeVec.size (); i++) {
list_size += nodeVec[i].Vnodes.size ();
f->vlist_ptr_h_[i+1] = list_size;
}
f->vlist_h_ = (int*) malloc (list_size * sizeof (int));
assert (f->vlist_h_);
idx = 0;
for(i = 0; i < nodeVec.size (); i++) {
for(j = 0; j < nodeVec[i].Vnodes.size (); j++) {
f->vlist_h_[idx] = nodeVec[i].Vnodes[j];
idx++;
}
}
assert (idx == list_size);
f->list_size = list_size;
f->reg_den_size = RP->n;
/*
f->reg_den_size_padded = (((RP->n + real_padding - 1) / real_padding) *
real_padding);
*/
/* reg den needs no host equivalent */
/* trans */
f->trans_arrays_num = 7 * 7 * 7;
f->tlist_h_ = (int*) malloc (list_size * sizeof (int));
assert (f->tlist_h_);
int id;
int dim = 3;
int t_index = 0;
for(i = 0;i < nodeVec.size (); i++) {
if(nodeVec[i].tag & LET_TRGNODE && nodeVec[i].Vnodes.size () > 0) {
Point3 gNodeIdxCtr (center (i, nodeVec));
real_t D = 2.0 * radius (i, nodeVec);
for(j = 0;j < nodeVec[i].Vnodes.size (); j++) {
idx = nodeVec[i].Vnodes[j];
Point3 viCtr (center (idx, nodeVec));
Index3 idx3;
for(int d = 0; d < dim; d++) {
idx3(d) = int (round ((viCtr[d] - gNodeIdxCtr[d]) / D));
}
id = (idx3(0) + 3) + (idx3(1) + 3) * 7 + (idx3(2) + 3) * 7 * 7;
f->tlist_h_[t_index] = id;
t_index++;
}
}
}
/* No need for these on the host */
/* f->vlist_src_d_ */
/* f->vlist_trg_d_ */
/* f->vlist_trans_d_ */
f->RP_n_ = RP->n;
/* No need for these on the host */
/* f->RP_X_d_ */
/* f->RP_Y_d_ */
/* f->RP_Z_d_ */
/* IFFT */
f->SP_DC_n_ = pln_size (DC, SP);
f->SP_DC_n_padded_ = (((f->SP_DC_n_ + real_padding - 1) / real_padding) *
real_padding);
/* TRG_DWN_CHK_VAL_h_ */
f->TRG_DWN_CHK_VAL_h_ = (dtype*) malloc (nodeVec.size () *
f->SP_DC_n_padded_ * sizeof (dtype));
assert (f->TRG_DWN_CHK_VAL_h_);
/* DOWN_CALC */
/* path2Node */
f->path2Node_h_ = (int3*) malloc (nodeVec.size () * sizeof (int3));
assert (f->path2Node_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->path2Node_h_[i].x = (nodeVec[i].path2Node)(0);
f->path2Node_h_[i].y = (nodeVec[i].path2Node)(1);
f->path2Node_h_[i].z = (nodeVec[i].path2Node)(2);
}
/* parent */
/* Not needed - use children structure instead */
/* Actually, now I need it */
f->parent_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->parent_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->parent_h_[i] = nodeVec[i].parent;
}
/* trg_dwn_equ_den */
f->SP_DE_n_ = pln_size (DE, SP);
f->SP_DE_n_padded = (((f->SP_DE_n_ + real_padding - 1) / real_padding) *
real_padding);
/* DC2DE_mat */
stopwatch_start (timer_);
compute_DC2DE_mat (TM, SP);
t_subtract += stopwatch_stop (timer_);
f->DC2DE_r = pln_size (DE, SP);
f->DC2DE_r_padded = (((f->DC2DE_r + real_padding - 1) / real_padding) *
real_padding);
f->DC2DE_c = pln_size (DC, SP);
f->DC2DE_h_ = (real_t*) malloc (f->DC2DE_r_padded * f->DC2DE_c *
sizeof (real_t));
assert (f->DC2DE_h_);
for(i = 0; i < f->DC2DE_c ; i++) {
memcpy (&f->DC2DE_h_[i * f->DC2DE_r_padded],
&TM->DC2DE[i * f->DC2DE_r],
f->DC2DE_r * sizeof (real_t));
}
/* DE2DC_mat[8] */
stopwatch_start (timer_);
TM->DE2DC = (real_t**) malloc (sizeof (real_t*) * 2 * 2 * 2);
for(int a = 0; a < 2; a++) {
for(int b = 0; b < 2; b++) {
for(int c = 0; c < 2 ; c++) {
Index3 idx(a, b, c);
compute_DE2DC_mat (idx, TM, SP);
}
}
}
t_subtract += stopwatch_stop (timer_);
f->DE2DC_r = pln_size (DC, SP);
f->DE2DC_r_padded = (((f->DE2DC_r + real_padding - 1) / real_padding) *
real_padding);
f->DE2DC_c = pln_size (DE, SP);
f->DE2DC_h_ = (real_t*) malloc ((2 * 2 * 2) * f->DE2DC_r_padded * f->DE2DC_c *
sizeof (real_t));
assert (f->DE2DC_h_);
for(i = 0; i < 2 * 2 * 2; i++) {
for(j = 0; j < f->DE2DC_c; j++) {
real_t* temp_tm = TM->DE2DC[i];
memcpy (&f->DE2DC_h_[i * f->DE2DC_r_padded * f->DE2DC_c +
j * f->DE2DC_r_padded],
&temp_tm[j * f->DE2DC_r],
f->DE2DC_r * sizeof (real_t));
}
}
/* down_calc configuration */
f->expansion_depth = 2;
f->num_nodes_expansion = (int) pow (8.0, f->expansion_depth);
f->expansion_offset = 0;
for(i = 0; i < f->expansion_depth; i++) {
f->expansion_offset += (int) pow (8.0, i);
}
/* down_calc SP[DE] */
f->SP_DE_h_ = (dtype*) malloc (3 * f->SP_DE_n_padded * sizeof (dtype));
assert (f->SP_DE_h_);
memcpy (&f->SP_DE_h_[0], SP[DE].x, SP[DE].n * sizeof (dtype));
memcpy (&f->SP_DE_h_[f->SP_DE_n_padded], SP[DE].y, SP[DE].n * sizeof (dtype));
memcpy (&f->SP_DE_h_[2 * f->SP_DE_n_padded], SP[DE].z,
SP[DE].n * sizeof (dtype));
//t_data_cpu = stopwatch_stop (timer) - t_subtract;
t_data_cpu = stopwatch_stop (timer);
fprintf (stderr, "==> Time: %Lg secs\n", t_data_cpu);
/* WLIST_CALC */
f->SP_UE_n_ = pln_size (UE, SP);
f->SP_UE_n_padded = (((f->SP_UE_n_ + real_padding - 1) / real_padding) *
real_padding);
f->SP_UE_h_ = (dtype*) malloc (3 * f->SP_UE_n_padded * sizeof (dtype));
assert (f->SP_UE_h_);
memcpy (&f->SP_UE_h_[0], SP[UE].x, SP[UE].n * sizeof (dtype));
memcpy (&f->SP_UE_h_[f->SP_UE_n_padded], SP[UE].y, SP[UE].n * sizeof (dtype));
memcpy (&f->SP_UE_h_[2 * f->SP_UE_n_padded], SP[UE].z,
SP[UE].n * sizeof (dtype));
wlist_create_wlist (&f->W_h_, nodeVec.size (), All_N);
f->srcNum_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->srcNum_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->srcNum_h_[i] = nodeVec[i].srcNum;
}
/* XLIST_CALC */
f->SP_DC_h_ = (dtype*) malloc (3 * f->SP_DC_n_padded_ * sizeof (dtype));
assert (f->SP_DC_h_);
memcpy (&f->SP_DC_h_[0], SP[DC].x, SP[DC].n * sizeof (dtype));
memcpy (&f->SP_DC_h_[f->SP_DC_n_padded_], SP[DC].y, SP[DC].n * sizeof (dtype));
memcpy (&f->SP_DC_h_[2 * f->SP_DC_n_padded_], SP[DC].z,
SP[DC].n * sizeof (dtype));
xlist_create_xlist (&f->X_h_, nodeVec.size (), All_N);
f->trgNum_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->trgNum_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->trgNum_h_[i] = nodeVec[i].trgNum;
}
#if 0
long int bytes_up = 0;
/* source boxes */
bytes_up += 4 * f->S_h_.Bptr_[f->S_h_.n_boxes_] * sizeof (dtype);
bytes_up += (f->S_h_.n_boxes_ + 1) * sizeof (int);
bytes_up += (f->S_h_.n_boxes_) * sizeof (int);
/* Radius */
bytes_up += nodeVec.size () * sizeof (dtype);
/* center */
bytes_up += 3 * nodeVec.size () * sizeof (dtype);
/* SP_UC */
bytes_up += 3 * f->SP_UC_size_padded * sizeof (dtype);
/* UC2UE */
bytes_up += f->UC2UE_r_padded * f->UC2UE_c * sizeof (dtype);
/* src_upw_equ_den */
bytes_up += nodeVec.size () * f->UC2UE_r_padded * sizeof (dtype);
/* child */
bytes_up += nodeVec.size () * sizeof (int);
/* UE2UC */
bytes_up += 8 * f->UE2UC_r_padded * f->UE2UC_c * sizeof (dtype);
/* tag */
bytes_up += nodeVec.size () * sizeof (int);
/* depth */
bytes_up += nodeVec.size () * sizeof (int);
double mega_bytes_up = (1.0 * bytes_up/ 1000000);
printf("VLIST requires %g mega bytes of data\n", mega_bytes_up);
#endif
#if 0
long int bytes_vlist = 0;
/* DEPTH */
bytes_vlist += nodeVec.size () * sizeof (int);
/* SRC_UPW_EQU_DEN */
bytes_vlist += nodeVec.size () * f->UC2UE_r_padded * sizeof (dtype);
/* REG_DEN */
bytes_vlist += nodeVec.size () * f->reg_den_size * sizeof (dtype);
/* VLIST_SRC */
bytes_vlist += nodeVec.size () * f->vlist_array_size * sizeof (dtype);
/* TT */
bytes_vlist += f->trans_arrays_num * f->RP_n_ * sizeof (dtype);
/* VLIST_TRANS */
bytes_vlist += f->trans_arrays_num * f->vlist_array_size * sizeof (dtype);
/* VLIST_TRG */
bytes_vlist += nodeVec.size () * f->vlist_array_size * sizeof (dtype);
/* VLIST_TLIST */
bytes_vlist += (nodeVec.size () + 1) * sizeof (int);
bytes_vlist += list_size * sizeof (int);
bytes_vlist += list_size * sizeof (int);
/* REG_DEN_IFFT */
bytes_vlist += nodeVec.size () * f->reg_den_size * sizeof (dtype);
/* TRG_DWN_CHK_VAL */
bytes_vlist += nodeVec.size () * f->SP_DC_n_padded_ * sizeof (dtype);
double mega_bytes_vlist = (1.0 * bytes_vlist / 1000000);
printf("VLIST requires %g mega bytes of data\n", mega_bytes_vlist);
#endif
/* ============================================================= */
/* GPU SIDE
*/
/* this is done to set up the GPU */
real_t* tmp_g;
cutilSafeCall (hipMalloc ((void**)&tmp_g, 1024 * 1024));
fprintf (stderr, "Creating GPU Data Structures ... ");
stopwatch_start (timer);
#if __SOURCE_BOX__
alloc__SOURCE_BOX__ (f);
#endif
#if __TARGET_BOX__
alloc__TARGET_BOX__ (f);
#endif
#if __U_LIST__
alloc__U_LIST__ (f);
#endif
#if __TAG__
alloc__TAG__ (f);
#endif
#if __DEPTH__
alloc__DEPTH__ (f);
#endif
#if __CHILDREN__
alloc__CHILDREN__ (f);
#endif
#if __RADIUS__
alloc__RADIUS__ (f);
#endif
#if __CENTER__
alloc__CENTER__ (f);
#endif
#if __SP_UC__
alloc__SP_UC__ (f);
#endif
#if __UC2UE__
alloc__UC2UE__ (f);
#endif
#if __UE2UC__
alloc__UE2UC__ (f);
#endif
#if __SRC_UPW_EQU_DEN__
alloc__SRC_UPW_EQU_DEN__ (f);
#endif
/* Vlist */
#if __VLIST_SRC__
alloc__VLIST_SRC__ (f);
#endif
#if __REG_DEN__
alloc__REG_DEN__ (f);
#endif
#if __TT__
alloc__TT__ (f);
#endif
#if __VLIST_TRANS__
alloc__VLIST_TRANS__ (f);
#endif
#if __VLIST_TRG__
alloc__VLIST_TRG__ (f);
#endif
#if __VLIST_TLIST__
alloc__VLIST_TLIST__ (f);
#endif
#if 0
#if __REG_DEN_IFFT__
/* ------------------------------------------------------------ */
/* IFFT */
cutilSafeCall (hipMalloc ((void**)&f->reg_den_ifft_d_,
nodeVec.size () * f->reg_den_size *
sizeof (dtype)));
assert (f->reg_den_ifft_d_);
/* ------------------------------------------------------------ */
#endif
#endif
#if __TRG_DWN_CHK_VAL__
alloc__TRG_DWN_CHK_VAL__ (f);
#endif
/* DOWN_CALC */
#if __PATH2NODE__
alloc__PATH2NODE__ (f);
#endif
#if __PARENT__
alloc__PARENT__ (f);
#endif
#if __TRG_DWN_EQU_DEN__
alloc__TRG_DWN_EQU_DEN__ (f);
#endif
#if __DC2DE__
alloc__DC2DE__ (f);
#endif
#if __DE2DC__
alloc__DE2DC__ (f);
#endif
#if __SP_DE__
alloc__SP_DE__ (f);
#endif
#if __SP_UE__
alloc__SP_UE__ (f);
#endif
#if __W_LIST__
alloc__W_LIST__ (f);
#endif
#if __SRCNUM__
alloc__SRCNUM__ (f);
#endif
#if __SP_DC__
alloc__SP_DC__ (f);
#endif
#if __X_LIST__
alloc__X_LIST__ (f);
#endif
#if __TRGNUM__
alloc__TRGNUM__ (f);
#endif
t_data_gpu = stopwatch_stop (timer);
fprintf (stderr, "==> Time: %Lg secs\n", t_data_gpu);
/* ============================================================= */
/* Copy data over to GPU
*/
fprintf (stderr, "Copying Data over PCIE ... ");
stopwatch_start (timer);
#if __SOURCE_BOX__
xfer__SOURCE_BOX__ (f);
#endif
#if __TARGET_BOX__
xfer__TARGET_BOX__ (f);
#endif
#if __U_LIST__
xfer__U_LIST__ (f);
#endif
#if __TAG__
xfer__TAG__ (f);
#endif
#if __DEPTH__
xfer__DEPTH__ (f);
#endif
#if __CHILDREN__
xfer__CHILDREN__ (f);
#endif
/* center and radius */
#if __RADIUS__
xfer__RADIUS__ (f);
#endif
#if __CENTER__
xfer__CENTER__ (f);
#endif
/* SP[UC] */
#if __SP_UC__
xfer__SP_UC__ (f);
#endif
#if __UC2UE__
xfer__UC2UE__ (f);
#endif
#if __UE2UC__
xfer__UE2UC__ (f);
#endif
/* No copying necessary for SRC_UPW_EQU_DEN_d_ */
/* No copying necessary for vlist_src_d_, vlist_trg_d_, vlist_trans_d_*/
/* No copying necessary for tt and reg_den */
/* vlist, tlist and pointer */
#if __VLIST_TLIST__
xfer__VLIST_TLIST__ (f);
#endif
/* No copying necessary for reg_den_ifft_d_ */
/* No copying necessary for TRG_DWN_CHK_VAL_d_ */
/* DOWN_CALC */
/* path2Node */
#if __PATH2NODE__
xfer__PATH2NODE__ (f);
#endif
/* No copying necessary for TRG_DWN_EQU_DEN_d_ */
/* parent */
/* Not needed - use children structure instead */
/* Now I need it */
#if __PARENT__
xfer__PARENT__ (f);
#endif
#if __DC2DE__
xfer__DC2DE__ (f);
#endif
/* DE2DC_mat[8] */
#if __DE2DC__
xfer__DE2DC__ (f);
#endif
/* SP[DE] */
#if __SP_DE__
xfer__SP_DE__ (f);
#endif
/* SP[UE] */
#if __SP_UE__
xfer__SP_UE__ (f);
#endif
#if __W_LIST__
xfer__W_LIST__ (f);
#endif
#if __SRCNUM__
xfer__SRCNUM__ (f);
#endif
#if __SP_DC__
xfer__SP_DC__ (f);
#endif
#if __X_LIST__
xfer__X_LIST__ (f);
#endif
#if __TRGNUM__
xfer__TRGNUM__ (f);
#endif
t_pcie = stopwatch_stop (timer);
fprintf (stderr, "==> Time: %Lg secs\n", t_pcie);
return f;
}
| 2eb5a56ff307b5e097f07087175e06d8b9543dcd.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include "evaluate.h"
#include "util.h"
#include "reals.h"
#include "partial.h"
#include "../timing/timing.h"
#include "node_gpu.h"
#include <cutil_inline.h>
/* ------------------------------------------------------------------------
*/
int
get_byte_padding()
{
return getenv__int("BYTEPAD", 128);
}
/* ------------------------------------------------------------------------
*/
void
xlist_create_xlist (UList_t* U, int num_boxes, AllNodes* All_N)
{
int i, j, nu;
int list_size = 0;
assert (U && All_N);
vector<NodeTree>& nodeVec = *All_N->N;
/* allocate memory for ulist ptr */
U->n_boxes_ = num_boxes;
U->Ptr_ = (int *) malloc (sizeof (int) * (num_boxes + 1));
assert (U->Ptr_);
/* See how big ulist should be */
U->Ptr_[0] = 0;
for(i = 0; i < U->n_boxes_; i++) {
list_size += nodeVec[i].Xnodes.size ();
U->Ptr_[i + 1] = list_size;
}
/* allocate memory for ulist */
U->L_ = (int*) malloc (sizeof (int) * list_size);
assert (U->L_);
/* initialize ulist */
for(i = 0; i < U->n_boxes_; i++) {
nu = nodeVec[i].Xnodes.size ();
for(j = 0; j < nu; j++) {
U->L_[U->Ptr_[i] + j] = nodeVec[i].Xnodes[j];
}
}
}
void
wlist_create_wlist (UList_t* U, int num_boxes, AllNodes* All_N)
{
int i, j, nu;
int list_size = 0;
assert (U && All_N);
vector<NodeTree>& nodeVec = *All_N->N;
/* allocate memory for ulist ptr */
U->n_boxes_ = num_boxes;
U->Ptr_ = (int *) malloc (sizeof (int) * (num_boxes + 1));
assert (U->Ptr_);
/* See how big ulist should be */
U->Ptr_[0] = 0;
for(i = 0; i < U->n_boxes_; i++) {
list_size += nodeVec[i].Wnodes.size ();
U->Ptr_[i + 1] = list_size;
}
/* allocate memory for ulist */
U->L_ = (int*) malloc (sizeof (int) * list_size);
assert (U->L_);
/* initialize ulist */
for(i = 0; i < U->n_boxes_; i++) {
nu = nodeVec[i].Wnodes.size ();
for(j = 0; j < nu; j++) {
U->L_[U->Ptr_[i] + j] = nodeVec[i].Wnodes[j];
}
}
}
void
ulist_create_ulist (UList_t* U, int num_boxes, AllNodes* All_N)
{
int i, j, nu;
int list_size = 0;
assert (U && All_N);
vector<NodeTree>& nodeVec = *All_N->N;
/* allocate memory for ulist ptr */
U->n_boxes_ = num_boxes;
U->Ptr_ = (int *) malloc (sizeof (int) * (num_boxes + 1));
assert (U->Ptr_);
/* See how big ulist should be */
U->Ptr_[0] = 0;
for(i = 0; i < U->n_boxes_; i++) {
list_size += nodeVec[i].Unodes.size ();
U->Ptr_[i + 1] = list_size;
}
/* allocate memory for ulist */
U->L_ = (int*) malloc (sizeof (int) * list_size);
assert (U->L_);
/* initialize ulist */
for(i = 0; i < U->n_boxes_; i++) {
nu = nodeVec[i].Unodes.size ();
for(j = 0; j < nu; j++) {
U->L_[U->Ptr_[i] + j] = nodeVec[i].Unodes[j];
}
}
}
/* ------------------------------------------------------------------------
*/
void
ulist_create_boxes__double_source (AllNodes *All_N, FMMWrapper_t *F)
{
int i, j, n;
int padding, n_padded, n_points_, n_points_padded_;
vector<NodeTree>& nodeVec = *All_N->N;
Boxes_t *B;
Node *N;
B = &F->S_h_;
N = All_N->Ns;
assert (B && N);
padding = get_byte_padding () / sizeof (dtype);
B->n_boxes_ = nodeVec.size ();
B->Bptr_ = (int *) malloc (sizeof (int) * (B->n_boxes_ + 1));
B->Bn_ = (int *) malloc (sizeof (int) * B->n_boxes_);
assert (B->Bptr_ && B->Bn_);
n_points_ = 0;
n_points_padded_ = 0;
B->Bptr_[0] = 0;
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child == -1) {
n = N[i].num_pts;
n_padded = ((n + padding - 1) / padding) * padding;
assert (n_padded >= n);
B->Bn_[i] = n;
B->Bptr_[i + 1] = B->Bptr_[i] + n_padded;
n_points_ += n;
n_points_padded_ += n_padded;
} else {
B->Bn_[i] = 0;
B->Bptr_[i + 1] = B->Bptr_[i];
}
}
assert (n_points_padded_ == B->Bptr_[B->n_boxes_]);
B->n_points_ = n_points_;
B->x_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->y_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->z_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->w_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
assert (B->x_ && B->y_ && B->z_ && B->w_);
/* copy points */
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_SRCNODE) {
n = N[i].num_pts;
for(j = 0; j < n; j++) {
B->x_[B->Bptr_[i] + j] = N[i].x[j];
B->y_[B->Bptr_[i] + j] = N[i].y[j];
B->z_[B->Bptr_[i] + j] = N[i].z[j];
B->w_[B->Bptr_[i] + j] = N[i].den_pot[j];
}
}
}
}
void
ulist_create_boxes__double_target (AllNodes *All_N, FMMWrapper_t *F)
{
int i, j, n;
int padding, n_padded, n_points_, n_points_padded_;
vector<NodeTree>& nodeVec = *All_N->N;
Boxes_t *B;
Node *N;
B = &F->T_h_;
N = All_N->Nt;
assert (B && N);
padding = get_byte_padding () / sizeof (dtype);
B->n_boxes_ = nodeVec.size ();
B->Bptr_ = (int *) malloc (sizeof (int) * (B->n_boxes_ + 1));
B->Bn_ = (int *) malloc (sizeof (int) * B->n_boxes_);
assert (B->Bptr_ && B->Bn_);
n_points_ = 0;
n_points_padded_ = 0;
B->Bptr_[0] = 0;
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_TRGNODE && nodeVec[i].child == -1) {
n = N[i].num_pts;
n_padded = ((n + padding - 1) / padding) * padding;
assert (n_padded >= n);
B->Bn_[i] = n;
B->Bptr_[i + 1] = B->Bptr_[i] + n_padded;
n_points_ += n;
n_points_padded_ += n_padded;
} else {
B->Bn_[i] = 0;
B->Bptr_[i + 1] = B->Bptr_[i];
}
}
assert (n_points_padded_ == B->Bptr_[B->n_boxes_]);
B->n_points_ = n_points_;
B->x_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->y_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->z_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
B->w_ = (real_t *) malloc (n_points_padded_ * sizeof (real_t));
assert (B->x_ && B->y_ && B->z_ && B->w_);
/* copy points */
for(i = 0; i < B->n_boxes_; i++) {
if(nodeVec[i].tag & LET_TRGNODE) {
n = N[i].num_pts;
for(j = 0; j < n; j++) {
B->x_[B->Bptr_[i] + j] = N[i].x[j];
B->y_[B->Bptr_[i] + j] = N[i].y[j];
B->z_[B->Bptr_[i] + j] = N[i].z[j];
B->w_[B->Bptr_[i] + j] = N[i].den_pot[j];
}
}
}
}
void
ulist_create_boxes__double (Boxes_t* B,
int num_boxes,
const Node* N,
int padding)
{
int i, j, n, n_padded, min;
/* total number of points */
int n_points_ = 0;
int n_points_padded_ = 0;
/* check if structures that were passed in are valid */
assert (B && N);
/* allocate memory to data structures */
B->n_boxes_ = num_boxes;
B->Bptr_ = (int *) malloc (sizeof (int) * (B->n_boxes_ + 1));
B->Bn_ = (int *) malloc (sizeof (int) * B->n_boxes_);
assert (B->Bptr_ && B->Bn_);
/* initialize data structures */
B->Bptr_[0] = 0;
for(i = 0; i < num_boxes; i++) {
/* number of points in this box */
n = N[i].num_pts;
/* number of points in this box if padded */
n_padded = ((n + padding - 1) / padding) * padding;
assert (n_padded >= n);
/* make Bn_ and Bptr_ have/point to the right values */
B->Bn_[i] = n;
B->Bptr_[i+1] = B->Bptr_[i] + n_padded;
n_points_ += n;
n_points_padded_ += n_padded;
}
assert (n_points_padded_ == B->Bptr_[B->n_boxes_]);
/* allocate memory to data structures that are going to hold the values */
B->n_points_ = n_points_;
B->x_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
B->y_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
B->z_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
B->w_ = (real_t*) malloc (n_points_padded_ * sizeof (real_t));
assert (B->x_ && B->y_ && B->z_ && B->w_);
/* copy points */
for(i = 0; i < num_boxes; i++) {
n = N[i].num_pts;
min = B->Bptr_[i];
for(j = 0; j < n; j++) {
B->x_[min + j] = N[i].x[j];
B->y_[min + j] = N[i].y[j];
B->z_[min + j] = N[i].z[j];
B->w_[min + j] = N[i].den_pot[j];
}
}
}
/* ------------------------------------------------------------------------
*/
void
alloc__SOURCE_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Source boxes */
f->S_d_.n_points_ = f->S_h_.n_points_;
f->S_d_.n_boxes_ = f->S_h_.n_boxes_;
/* Allocate memory for data */
cutilSafeCall (cudaMalloc ((void**)&f->S_d_.x_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->S_d_.y_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->S_d_.z_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->S_d_.w_,
f->S_h_.Bptr_[f->S_h_.n_boxes_]
* sizeof (dtype)));
/* Allocate memory for pointers */
cutilSafeCall (cudaMalloc ((void**)&f->S_d_.Bptr_,
(f->S_d_.n_boxes_ + 1) * sizeof (int)));
cutilSafeCall (cudaMalloc ((void**)&f->S_d_.Bn_,
f->S_d_.n_boxes_ * sizeof (int)));
assert (&f->S_d_ && &f->S_h_);
/* ------------------------------------------------------------ */
}
void
alloc__TARGET_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Target boxes */
f->T_d_.n_points_ = f->T_h_.n_points_;
f->T_d_.n_boxes_ = f->T_h_.n_boxes_;
/* Allocate memory for data */
cutilSafeCall (cudaMalloc ((void**)&f->T_d_.x_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->T_d_.y_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->T_d_.z_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->T_d_.w_,
f->T_h_.Bptr_[f->T_h_.n_boxes_]
* sizeof (dtype)));
/* Allocate memory for pointers */
cutilSafeCall (cudaMalloc ((void**)&f->T_d_.Bptr_,
(f->T_h_.n_boxes_ + 1) * sizeof (int)));
cutilSafeCall (cudaMalloc ((void**)&f->T_d_.Bn_,
f->T_h_.n_boxes_ * sizeof (int)));
assert (&f->T_d_ && &f->T_h_);
/* ------------------------------------------------------------ */
}
void
alloc__U_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
f->U_d_.n_boxes_ = f->U_h_.n_boxes_;
cutilSafeCall (cudaMalloc ((void**)&f->U_d_.L_,
f->U_h_.Ptr_[f->U_h_.n_boxes_] * sizeof (int)));
cutilSafeCall (cudaMalloc ((void**)&f->U_d_.Ptr_,
(f->U_h_.n_boxes_ + 1) * sizeof (int)));
assert (&f->U_d_ && &f->U_h_);
/* ------------------------------------------------------------ */
}
void
alloc__TAG__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* tag */
cutilSafeCall (cudaMalloc ((void**)&f->tag_d_,
nodeVec.size () * sizeof (int)));
assert (f->tag_d_);
/* ------------------------------------------------------------ */
}
void
alloc__DEPTH__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* depth */
cutilSafeCall (cudaMalloc ((void**)&f->depth_d_,
nodeVec.size () * sizeof (int)));
assert (f->depth_d_);
/* ------------------------------------------------------------ */
}
void
alloc__CHILDREN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* children */
/*
cutilSafeCall (cudaMalloc ((void**)&f->child_d_,
num_non_leaf_nodes * sizeof (int)));
*/
cutilSafeCall (cudaMalloc ((void**)&f->child_d_,
nodeVec.size () * sizeof (int)));
assert (f->child_d_);
/* ------------------------------------------------------------ */
}
void
alloc__RADIUS__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* radius */
/*
cutilSafeCall (cudaMalloc ((void**)&f->radius_d_,
num_leaf_nodes * sizeof (dtype)));
*/
cutilSafeCall (cudaMalloc ((void**)&f->radius_d_,
nodeVec.size () * sizeof (dtype)));
assert (f->radius_d_);
/* ------------------------------------------------------------ */
}
void
alloc__CENTER__ (FMMWrapper_t* f) {
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* radius */
/*
cutilSafeCall (cudaMalloc ((void**)&f->radius_d_,
num_leaf_nodes * sizeof (dtype)));
*/
cutilSafeCall (cudaMalloc ((void**)&f->radius_d_,
nodeVec.size () * sizeof (dtype)));
/* ------------------------------------------------------------ */
/* center */
cutilSafeCall (cudaMalloc ((void**)&f->center0_d_,
nodeVec.size () * sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->center1_d_,
nodeVec.size () * sizeof (dtype)));
cutilSafeCall (cudaMalloc ((void**)&f->center2_d_,
nodeVec.size () * sizeof (dtype)));
assert (f->center0_d_ && f->center1_d_ && f->center2_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SP_UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[UC] */
cutilSafeCall (cudaMalloc ((void**)&f->SP_UC_d_,
3 * f->SP_UC_size_padded * sizeof (dtype)));
assert (f->SP_UC_d_);
/* ------------------------------------------------------------ */
}
void
alloc__UC2UE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UC2UE matrix */
cutilSafeCall (cudaMalloc ((void**)&f->UC2UE_d_,
f->UC2UE_r_padded * f->UC2UE_c * sizeof (dtype)));
assert (f->UC2UE_d_);
/* ------------------------------------------------------------ */
}
void
alloc__UE2UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UE2UC matrix */
cutilSafeCall (cudaMalloc ((void**)&f->UE2UC_d_,
(2 * 2 * 2) * (f->UE2UC_r_padded * f->UE2UC_c) *
sizeof (dtype)));
assert (f->UE2UC_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SRC_UPW_EQU_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* Temporary up_calc GPU variables */
/* src_upw_equ_den */
cutilSafeCall (cudaMalloc ((void**)&f->SRC_UPW_EQU_DEN_d_,
nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype)));
assert (f->SRC_UPW_EQU_DEN_d_);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_SRC__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* src */
cutilSafeCall (cudaMalloc ((void**)&f->vlist_src_d_,
nodeVec.size () * f->vlist_array_size *
sizeof (dtype)));
assert (f->vlist_src_d_);
/* ------------------------------------------------------------ */
}
void
alloc__REG_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* reg_den */
cutilSafeCall (cudaMalloc ((void**)&f->reg_den_d_,
nodeVec.size () * f->reg_den_size *
sizeof (dtype)));
assert (f->reg_den_d_);
/* ------------------------------------------------------------ */
}
void
alloc__TT__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* tt */
cutilSafeCall (cudaMalloc ((void**)&f->tt, f->trans_arrays_num * f->RP_n_ *
sizeof (dtype)));
assert (f->tt);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_TRANS__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trans */
cutilSafeCall (cudaMalloc ((void**)&f->vlist_trans_d_,
f->trans_arrays_num * f->vlist_array_size *
sizeof (dtype)));
assert (f->vlist_trans_d_);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_TRG__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* trg */
cutilSafeCall (cudaMalloc ((void**)&f->vlist_trg_d_,
nodeVec.size () * f->vlist_array_size *
sizeof (dtype)));
assert (f->vlist_trg_d_);
/* ------------------------------------------------------------ */
}
void
alloc__VLIST_TLIST__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* vlist and tlist and pointer */
/* pointer */
cutilSafeCall (cudaMalloc ((void**)&f->vlist_ptr_d_,
(nodeVec.size () + 1) * sizeof (int)));
assert (f->vlist_ptr_d_);
/* vlist */
cutilSafeCall (cudaMalloc ((void**)&f->vlist_d_, f->list_size * sizeof (int)));
assert (f->vlist_d_);
/* tlist */
cutilSafeCall (cudaMalloc ((void**)&f->tlist_d_, f->list_size * sizeof (int)));
assert (f->tlist_d_);
/* ------------------------------------------------------------ */
}
void
alloc__TRG_DWN_CHK_VAL__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* trg_dwn_chk_val */
cutilSafeCall (cudaMalloc ((void**)&f->TRG_DWN_CHK_VAL_d_,
nodeVec.size () * f->SP_DC_n_padded_ *
sizeof (dtype)));
assert (f->TRG_DWN_CHK_VAL_d_);
/* ------------------------------------------------------------ */
}
void
alloc__PATH2NODE__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* path2Node */
cutilSafeCall (cudaMalloc ((void**)&f->path2Node_d_,
nodeVec.size () * sizeof (int3)));
assert (f->path2Node_d_);
/* ------------------------------------------------------------ */
}
void
alloc__PARENT__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* parent */
/* Not needed - use children structure instead */
/* Now I need it */
cutilSafeCall (cudaMalloc ((void**)&f->parent_d_,
nodeVec.size () * sizeof (int)));
assert (f->parent_d_);
/* ------------------------------------------------------------ */
}
void
alloc__TRG_DWN_EQU_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* ------------------------------------------------------------ */
/* trg_dwn_equ_den */
cutilSafeCall (cudaMalloc ((void**)&f->TRG_DWN_EQU_DEN_d_,
nodeVec.size () * f->SP_DE_n_padded *
sizeof (dtype)));
assert (f->TRG_DWN_EQU_DEN_d_);
/* ------------------------------------------------------------ */
}
void
alloc__DC2DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DC2DE_mat */
cutilSafeCall (cudaMalloc ((void**)&f->DC2DE_d_,
f->DC2DE_r_padded * f->DC2DE_c * sizeof (dtype)));
assert (f->DC2DE_d_);
/* ------------------------------------------------------------ */
}
void
alloc__DE2DC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DE2DC_mat[8] */
cutilSafeCall (cudaMalloc ((void**)&f->DE2DC_d_,
(2 * 2 * 2) * f->DE2DC_r_padded * f->DE2DC_c *
sizeof (dtype)));
assert (f->DE2DC_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SP_DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[DE] */
cutilSafeCall (cudaMalloc ((void**)&f->SP_DE_d_,
3 * f->SP_DE_n_padded * sizeof (dtype)));
assert (f->SP_DE_d_);
/* ------------------------------------------------------------ */
}
void
alloc__SP_UE__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMalloc ((void**)&f->SP_UE_d_,
3 * f->SP_UE_n_padded * sizeof (dtype)));
assert (f->SP_UE_d_);
}
void
alloc__W_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
f->W_d_.n_boxes_ = f->W_h_.n_boxes_;
cutilSafeCall (cudaMalloc ((void**)&f->W_d_.L_,
f->W_h_.Ptr_[f->W_h_.n_boxes_] * sizeof (int)));
cutilSafeCall (cudaMalloc ((void**)&f->W_d_.Ptr_,
(f->W_h_.n_boxes_ + 1) * sizeof (int)));
assert (&f->W_d_ && &f->W_h_);
/* ------------------------------------------------------------ */
}
void
alloc__SRCNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMalloc ((void**)&f->srcNum_d_,
nodeVec.size () * sizeof (int)));
assert (f->srcNum_d_);
}
void
alloc__SP_DC__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMalloc ((void**)&f->SP_DC_d_,
3 * f->SP_DC_n_padded_ * sizeof (dtype)));
assert (f->SP_DC_d_);
}
void
alloc__X_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
f->X_d_.n_boxes_ = f->X_h_.n_boxes_;
cutilSafeCall (cudaMalloc ((void**)&f->X_d_.L_,
f->X_h_.Ptr_[f->X_h_.n_boxes_] * sizeof (int)));
cutilSafeCall (cudaMalloc ((void**)&f->X_d_.Ptr_,
(f->X_h_.n_boxes_ + 1) * sizeof (int)));
assert (&f->X_d_ && &f->X_h_);
/* ------------------------------------------------------------ */
}
void
alloc__TRGNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMalloc ((void**)&f->trgNum_d_,
nodeVec.size () * sizeof (int)));
assert (f->trgNum_d_);
}
void
xfer__SOURCE_BOX__ (FMMWrapper_t* f)
{
/* Source boxes */
cutilSafeCall (cudaMemcpy (f->S_d_.x_, f->S_h_.x_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->S_d_.y_, f->S_h_.y_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->S_d_.z_, f->S_h_.z_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->S_d_.w_, f->S_h_.w_,
f->S_h_.Bptr_[f->S_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->S_d_.Bptr_, f->S_h_.Bptr_,
(f->S_h_.n_boxes_ + 1) * sizeof (int),
cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->S_d_.Bn_, f->S_h_.Bn_,
f->S_h_.n_boxes_ * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__TARGET_BOX__ (FMMWrapper_t* f)
{
/* Target boxes */
cutilSafeCall (cudaMemcpy (f->T_d_.x_, f->T_h_.x_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->T_d_.y_, f->T_h_.y_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->T_d_.z_, f->T_h_.z_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->T_d_.w_, f->T_h_.w_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->T_d_.Bptr_, f->T_h_.Bptr_,
(f->T_h_.n_boxes_ + 1) * sizeof (int),
cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->T_d_.Bn_, f->T_h_.Bn_,
f->T_h_.n_boxes_ * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__U_LIST__ (FMMWrapper_t* f)
{
/* Ulist */
cutilSafeCall (cudaMemcpy (f->U_d_.L_, f->U_h_.L_,
f->U_h_.Ptr_[f->U_h_.n_boxes_] *
sizeof (int), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->U_d_.Ptr_, f->U_h_.Ptr_,
(f->U_h_.n_boxes_ + 1) * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__TAG__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->tag_d_, f->tag_h_,
nodeVec.size () * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__DEPTH__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
/* depth */
cutilSafeCall (cudaMemcpy (f->depth_d_, f->depth_h_,
nodeVec.size () * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__CHILDREN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->child_d_, f->child_h_,
nodeVec.size () * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__RADIUS__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->radius_d_, f->radius_h_,
nodeVec.size () * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__CENTER__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->center0_d_, f->center0_h_,
nodeVec.size () * sizeof (dtype),
cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->center1_d_, f->center1_h_,
nodeVec.size () * sizeof (dtype),
cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->center2_d_, f->center2_h_,
nodeVec.size () * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__SP_UC__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->SP_UC_d_, f->SP_UC_h_,
3 * f->SP_UC_size_padded * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__UC2UE__ (FMMWrapper_t* f)
{
/* UC2UE matrix */
cutilSafeCall (cudaMemcpy (f->UC2UE_d_, f->UC2UE_h_,
f->UC2UE_r_padded * f->UC2UE_c * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__UE2UC__ (FMMWrapper_t* f)
{
/* UE2UC matrix */
cutilSafeCall (cudaMemcpy (f->UE2UC_d_, f->UE2UC_h_,
(2 * 2 * 2) * (f->UE2UC_r_padded * f->UE2UC_c) *
sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__SRC_UPW_EQU_DEN__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->SRC_UPW_EQU_DEN_d_, f->SRC_UPW_EQU_DEN_h_,
nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__VLIST_TLIST__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->vlist_d_, f->vlist_h_,
f->list_size * sizeof (int),
cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->tlist_d_, f->tlist_h_,
f->list_size * sizeof (int),
cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->vlist_ptr_d_, f->vlist_ptr_h_,
(nodeVec.size () + 1) * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__TRG_DWN_CHK_VAL__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->TRG_DWN_CHK_VAL_d_, f->TRG_DWN_CHK_VAL_h_,
nodeVec.size () * f->SP_DC_n_padded_ *
sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__PATH2NODE__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->path2Node_d_, f->path2Node_h_,
nodeVec.size () * sizeof (int3),
cudaMemcpyHostToDevice));
}
void
xfer__PARENT__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->parent_d_, f->parent_h_,
nodeVec.size () * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__DC2DE__(FMMWrapper_t* f)
{
/* DC2DE_mat */
cutilSafeCall (cudaMemcpy (f->DC2DE_d_, f->DC2DE_h_,
f->DC2DE_r_padded * f->DC2DE_c * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__DE2DC__(FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->DE2DC_d_, f->DE2DC_h_,
(2 * 2 * 2) * f->DE2DC_r_padded * f->DE2DC_c *
sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__SP_DE__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->SP_DE_d_, f->SP_DE_h_,
3 * f->SP_DE_n_padded * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__SP_UE__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->SP_UE_d_, f->SP_UE_h_,
3 * f->SP_UE_n_padded * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__W_LIST__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->W_d_.L_, f->W_h_.L_,
f->W_h_.Ptr_[f->W_h_.n_boxes_] *
sizeof (int), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->W_d_.Ptr_, f->W_h_.Ptr_,
(f->W_h_.n_boxes_ + 1) * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__SRCNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->srcNum_d_, f->srcNum_h_,
nodeVec.size () * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__SP_DC__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->SP_DC_d_, f->SP_DC_h_,
3 * f->SP_DC_n_padded_ * sizeof (dtype),
cudaMemcpyHostToDevice));
}
void
xfer__X_LIST__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->X_d_.L_, f->X_h_.L_,
f->X_h_.Ptr_[f->X_h_.n_boxes_] *
sizeof (int), cudaMemcpyHostToDevice));
cutilSafeCall (cudaMemcpy (f->X_d_.Ptr_, f->X_h_.Ptr_,
(f->X_h_.n_boxes_ + 1) * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__TRGNUM__ (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->trgNum_d_, f->trgNum_h_,
nodeVec.size () * sizeof (int),
cudaMemcpyHostToDevice));
}
void
xfer__SRC_UPW_EQU_DEN__back (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->SRC_UPW_EQU_DEN_h_, f->SRC_UPW_EQU_DEN_d_,
nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype),
cudaMemcpyDeviceToHost));
}
void
xfer__TRG_DWN_CHK_VAL__back (FMMWrapper_t* f)
{
AllNodes *All_N = f->AN;
vector<NodeTree>& nodeVec = *All_N->N;
cutilSafeCall (cudaMemcpy (f->TRG_DWN_CHK_VAL_h_, f->TRG_DWN_CHK_VAL_d_,
nodeVec.size () * f->SP_DC_n_padded_ *
sizeof (dtype),
cudaMemcpyDeviceToHost));
}
void
xfer__TARGET_BOX__back (FMMWrapper_t* f)
{
cutilSafeCall (cudaMemcpy (f->T_h_.w_, f->T_d_.w_,
f->T_h_.Bptr_[f->T_h_.n_boxes_] *
sizeof (dtype), cudaMemcpyDeviceToHost));
}
void
free__SOURCE_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Deallocate memory for data */
cutilSafeCall (cudaFree (f->S_d_.x_));
cutilSafeCall (cudaFree (f->S_d_.y_));
cutilSafeCall (cudaFree (f->S_d_.z_));
cutilSafeCall (cudaFree (f->S_d_.w_));
/* Deallocate memory for pointers */
cutilSafeCall (cudaFree (f->S_d_.Bptr_));
cutilSafeCall (cudaFree (f->S_d_.Bn_));
/* ------------------------------------------------------------ */
}
void
free__TARGET_BOX__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Target boxes */
cutilSafeCall (cudaFree (f->T_d_.x_));
cutilSafeCall (cudaFree (f->T_d_.y_));
cutilSafeCall (cudaFree (f->T_d_.z_));
cutilSafeCall (cudaFree (f->T_d_.w_));
/* Allocate memory for pointers */
cutilSafeCall (cudaFree (f->T_d_.Bptr_));
cutilSafeCall (cudaFree (f->T_d_.Bn_));
/* ------------------------------------------------------------ */
}
void
free__U_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
cutilSafeCall (cudaFree (f->U_d_.L_));
cutilSafeCall (cudaFree (f->U_d_.Ptr_));
/* ------------------------------------------------------------ */
}
void
free__TAG__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* tag */
cutilSafeCall (cudaFree (f->tag_d_));
/* ------------------------------------------------------------ */
}
void
free__DEPTH__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* depth */
cutilSafeCall (cudaFree (f->depth_d_));
/* ------------------------------------------------------------ */
}
void
free__CHILDREN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* children */
cutilSafeCall (cudaFree (f->child_d_));
/* ------------------------------------------------------------ */
}
void
free__RADIUS__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* radius */
cutilSafeCall (cudaFree (f->radius_d_));
/* ------------------------------------------------------------ */
}
void
free__CENTER__ (FMMWrapper_t* f) {
/* ------------------------------------------------------------ */
cutilSafeCall (cudaFree (f->center0_d_));
cutilSafeCall (cudaFree (f->center1_d_));
cutilSafeCall (cudaFree (f->center2_d_));
/* ------------------------------------------------------------ */
}
void
free__SP_UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[UC] */
cutilSafeCall (cudaFree (f->SP_UC_d_));
/* ------------------------------------------------------------ */
}
void
free__UC2UE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UC2UE matrix */
cutilSafeCall (cudaFree (f->UC2UE_d_));
/* ------------------------------------------------------------ */
}
void
free__UE2UC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* UE2UC matrix */
cutilSafeCall (cudaFree (f->UE2UC_d_));
/* ------------------------------------------------------------ */
}
void
free__SRC_UPW_EQU_DEN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* Temporary up_calc GPU variables */
/* src_upw_equ_den */
cutilSafeCall (cudaFree (f->SRC_UPW_EQU_DEN_d_));
/* ------------------------------------------------------------ */
}
void
free__VLIST_SRC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* src */
cutilSafeCall (cudaFree (f->vlist_src_d_));
/* ------------------------------------------------------------ */
}
void
free__REG_DEN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* reg_den */
cutilSafeCall (cudaFree (f->reg_den_d_));
/* ------------------------------------------------------------ */
}
void
free__TT__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* tt */
cutilSafeCall (cudaFree (f->tt));
/* ------------------------------------------------------------ */
}
void
free__VLIST_TRANS__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trans */
cutilSafeCall (cudaFree (f->vlist_trans_d_));
/* ------------------------------------------------------------ */
}
void
free__VLIST_TRG__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trg */
cutilSafeCall (cudaFree (f->vlist_trg_d_));
/* ------------------------------------------------------------ */
}
void
free__VLIST_TLIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* vlist and tlist and pointer */
/* pointer */
cutilSafeCall (cudaFree (f->vlist_ptr_d_));
/* vlist */
cutilSafeCall (cudaFree (f->vlist_d_));
/* tlist */
cutilSafeCall (cudaFree (f->tlist_d_));
/* ------------------------------------------------------------ */
}
void
free__TRG_DWN_CHK_VAL__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trg_dwn_chk_val */
cutilSafeCall (cudaFree (f->TRG_DWN_CHK_VAL_d_));
/* ------------------------------------------------------------ */
}
void
free__PATH2NODE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* path2Node */
cutilSafeCall (cudaFree (f->path2Node_d_));
/* ------------------------------------------------------------ */
}
void
free__PARENT__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* parent */
/* Not needed - use children structure instead */
/* Now I need it */
cutilSafeCall (cudaFree (f->parent_d_));
/* ------------------------------------------------------------ */
}
void
free__TRG_DWN_EQU_DEN__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* trg_dwn_equ_den */
cutilSafeCall (cudaFree (f->TRG_DWN_EQU_DEN_d_));
/* ------------------------------------------------------------ */
}
void
free__DC2DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DC2DE_mat */
cutilSafeCall (cudaFree (f->DC2DE_d_));
/* ------------------------------------------------------------ */
}
void
free__DE2DC__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* DE2DC_mat[8] */
cutilSafeCall (cudaFree (f->DE2DC_d_));
/* ------------------------------------------------------------ */
}
void
free__SP_DE__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* SP[DE] */
cutilSafeCall (cudaFree (f->SP_DE_d_));
/* ------------------------------------------------------------ */
}
void
free__SP_UE__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaFree (f->SP_UE_d_));
}
void
free__W_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
cutilSafeCall (cudaFree (f->W_d_.L_));
cutilSafeCall (cudaFree (f->W_d_.Ptr_));
/* ------------------------------------------------------------ */
}
void
free__SRCNUM__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaFree (f->srcNum_d_));
}
void
free__SP_DC__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaFree (f->SP_DC_d_));
}
void
free__X_LIST__ (FMMWrapper_t* f)
{
/* ------------------------------------------------------------ */
/* ulist */
cutilSafeCall (cudaFree (f->X_d_.L_));
cutilSafeCall (cudaFree (f->X_d_.Ptr_));
/* ------------------------------------------------------------ */
}
void
free__TRGNUM__ (FMMWrapper_t* f)
{
cutilSafeCall (cudaFree (f->trgNum_d_));
}
FMMWrapper_t *
preproc (AllNodes* All_N)
{
FMMWrapper_t* f = (FMMWrapper_t *) malloc (sizeof (FMMWrapper_t));
assert (f);
f->AN = All_N;
int i, j, idx;
Point3 c;
real_t r;
// int num_leaf_nodes;
// int num_non_leaf_nodes;
int num_leaf_nodes_src;
int num_leaf_nodes_trg;
int num_non_leaf_nodes_src;
int num_non_leaf_nodes_trg;
int list_size;
Pos *SP = All_N->SP;
Trans_matrix *TM = All_N->TM;
Pos *RP = All_N->RP;
struct stopwatch_t* timer = NULL;
struct stopwatch_t* timer_ = NULL;
long double t_data_cpu, t_data_gpu, t_pcie, t_subtract;
stopwatch_init ();
timer = stopwatch_create ();
timer_ = stopwatch_create ();
/* ============================================================= */
/* CPU SIDE
*/
real_t* tmp_c;
tmp_c = (real_t*) malloc (1024 * 1024);
assert (tmp_c);
fprintf (stderr, "Creating Host Data Structures ... ");
stopwatch_start (timer);
/* byte alignment required for coalesced loading */
int byte_padding = get_byte_padding ();
/* real_padding is padding in terms of # of data elements */
int real_padding = byte_padding / sizeof (dtype);
/* Create GPU friendly Source boxes */
vector<NodeTree>& nodeVec = *All_N->N;
/*
ulist_create_boxes__double (&f->S_h_, nodeVec.size (),
All_N->Ns, real_padding);
*/
ulist_create_boxes__double_source (All_N, f);
/* Create GPU friendly Target boxes */
/*
ulist_create_boxes__double (&f->T_h_, nodeVec.size (),
All_N->Nt, real_padding);
*/
ulist_create_boxes__double_target (All_N, f);
/* Create GPU friendly ulist data structure */
ulist_create_ulist (&f->U_h_, nodeVec.size (), All_N);
/* depth and children */
f->depth_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->depth_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->depth_h_[i] = nodeVec[i].depth;
}
// num_leaf_nodes = 0;
// num_non_leaf_nodes = 0;
/*
num_leaf_nodes = (int) pow (8.0, nodeVec[nodeVec.size () - 1].depth);
num_non_leaf_nodes = nodeVec.size () - num_leaf_nodes;
assert ((num_leaf_nodes + num_non_leaf_nodes) == nodeVec.size ());
*/
/* num leaf and non-leaf nodes for src and trg */
num_leaf_nodes_src = 0;
num_non_leaf_nodes_src = 0;
for(i = 0; i < nodeVec.size (); i++) {
if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child == -1) {
num_leaf_nodes_src++;
} else if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child != -1) {
num_non_leaf_nodes_src++;
}
}
num_leaf_nodes_trg = 0;
num_non_leaf_nodes_trg = 0;
for(i = 0; i < nodeVec.size (); i++) {
if(nodeVec[i].tag & LET_TRGNODE && nodeVec[i].child == -1) {
num_leaf_nodes_trg++;
} else if(nodeVec[i].tag & LET_SRCNODE && nodeVec[i].child != -1) {
num_non_leaf_nodes_trg++;
}
}
f->child_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->child_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->child_h_[i] = nodeVec[i].child;
}
/* Pre-compute center and radius */
f->radius_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
f->center0_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
f->center1_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
f->center2_h_ = (real_t*) malloc (nodeVec.size () * sizeof (real_t));
assert (f->radius_h_ && f->center0_h_ && f->center1_h_ && f->center2_h_);
// idx = 0;
for(i = 0; i < nodeVec.size (); i++) {
c = center (i, nodeVec);
r = radius (i, nodeVec);
f->radius_h_[i] = r;
f->center0_h_[i] = c(0);
f->center1_h_[i] = c(1);
f->center2_h_[i] = c(2);
}
/* tag */
/* SRC or TG */
f->tag_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
for(i = 0; i < nodeVec.size (); i++) {
f->tag_h_[i] = nodeVec[i].tag;
}
/* SP[UC] */
/* SP[UC] consists of 3 arrays x, y, and z each of which are
* (np+2)^3 - (np)^3 long
*/
/* allocate memory */
f->SP_UC_size = pln_size (UC, SP);
f->SP_UC_size_padded = (((pln_size (UC, SP) + real_padding - 1) /
real_padding) * real_padding);
/* 3x for x, y, and z */
f->SP_UC_h_ = (real_t*) malloc (3 * f->SP_UC_size_padded * sizeof (real_t));
assert (f->SP_UC_h_);
/* initialize data */
memcpy (&f->SP_UC_h_[0], SP[UC].x, SP[UC].n * sizeof (real_t));
memcpy (&f->SP_UC_h_[1 * f->SP_UC_size_padded], SP[UC].y,
SP[UC].n * sizeof (real_t));
memcpy (&f->SP_UC_h_[2 * f->SP_UC_size_padded], SP[UC].z,
SP[UC].n * sizeof (real_t));
/* UC2UE */
stopwatch_start (timer_);
compute_UC2UE_mat (TM, SP);
t_subtract = stopwatch_stop (timer_);
f->UC2UE_r = pln_size (UE, SP);
f->UC2UE_r_padded = (((f->UC2UE_r + real_padding - 1) / real_padding) *
real_padding);
f->UC2UE_c = pln_size (UC, SP);
f->UC2UE_h_ = (real_t*) malloc (f->UC2UE_r_padded * f->UC2UE_c *
sizeof (real_t));
assert (f->UC2UE_h_);
for(i = 0; i < f->UC2UE_c; i++) {
memcpy (&f->UC2UE_h_[i * f->UC2UE_r_padded], &TM->UC2UE[i * f->UC2UE_r],
f->UC2UE_r * sizeof (real_t));
}
/* UE2UC */
stopwatch_start (timer_);
TM->UE2UC = (real_t**) malloc (sizeof (real_t*) * 2 * 2 * 2);
for(int a_ = 0; a_ < 2; a_++) {
for(int b_ = 0; b_ < 2; b_++) {
for(int c_ = 0; c_ < 2; c_++) {
Index3 idx3(a_, b_, c_);
compute_UE2UC_mat (idx3, TM, SP);
}
}
}
t_subtract += stopwatch_stop (timer_);
f->UE2UC_r = pln_size (UC, SP);
f->UE2UC_r_padded = (((f->UE2UC_r + real_padding - 1) / real_padding) *
real_padding);
f->UE2UC_c = pln_size (UE, SP);
f->UE2UC_h_ = (real_t*) malloc ((2 * 2 * 2) * (f->UE2UC_r_padded * f->UE2UC_c)
* sizeof (real_t));
assert (f->UE2UC_h_);
/* copy each matrix */
for(i = 0; i < (2 * 2 * 2); i++) {
/* 1 column at a time */
for(j = 0; j < f->UE2UC_c; j++) {
memcpy (&f->UE2UC_h_[i * f->UE2UC_r_padded * f->UE2UC_c +
j * f->UE2UC_r_padded],
&TM->UE2UC[i][j * f->UE2UC_r], f->UE2UC_r * sizeof (dtype));
}
}
/* SRC_UPW_EQU_DEN_h_ */
f->SRC_UPW_EQU_DEN_h_ = (dtype*) malloc (nodeVec.size () * f->UC2UE_r_padded *
sizeof (dtype));
assert (f->SRC_UPW_EQU_DEN_h_);
/* up_calc__gpu configuration variables */
// f->num_non_leaf_nodes = num_non_leaf_nodes;
f->tree_max_depth = nodeVec[nodeVec.size () - 1].depth;
f->reduction_depth = f->tree_max_depth - 2;
f->num_nodes_reduction = (int) pow (8.0, f->reduction_depth);
f->reduction_offset = 0;
for(i = 0; i < f->reduction_depth; i++)
f->reduction_offset += (int) pow (8.0, i);
/* up_calc temporary arrays */
/* src_upw_equ_den */
/* There is no CPU equivalent of this as all this is needed is in the GPU */
/* VLIST data structures */
f->vlist_array_size = eff_data_size (UE);
/*
f->vlist_array_size_padded = (((f->vlist_array_size + real_padding - 1) /
real_padding) * real_padding);
*/
/* trg */
/* src */
f->vlist_ptr_h_ = (int*) malloc ((nodeVec.size () + 1) * sizeof (int));
assert (f->vlist_ptr_h_);
list_size = 0;
f->vlist_ptr_h_[0] = 0;
for(i = 0; i < nodeVec.size (); i++) {
list_size += nodeVec[i].Vnodes.size ();
f->vlist_ptr_h_[i+1] = list_size;
}
f->vlist_h_ = (int*) malloc (list_size * sizeof (int));
assert (f->vlist_h_);
idx = 0;
for(i = 0; i < nodeVec.size (); i++) {
for(j = 0; j < nodeVec[i].Vnodes.size (); j++) {
f->vlist_h_[idx] = nodeVec[i].Vnodes[j];
idx++;
}
}
assert (idx == list_size);
f->list_size = list_size;
f->reg_den_size = RP->n;
/*
f->reg_den_size_padded = (((RP->n + real_padding - 1) / real_padding) *
real_padding);
*/
/* reg den needs no host equivalent */
/* trans */
f->trans_arrays_num = 7 * 7 * 7;
f->tlist_h_ = (int*) malloc (list_size * sizeof (int));
assert (f->tlist_h_);
int id;
int dim = 3;
int t_index = 0;
for(i = 0;i < nodeVec.size (); i++) {
if(nodeVec[i].tag & LET_TRGNODE && nodeVec[i].Vnodes.size () > 0) {
Point3 gNodeIdxCtr (center (i, nodeVec));
real_t D = 2.0 * radius (i, nodeVec);
for(j = 0;j < nodeVec[i].Vnodes.size (); j++) {
idx = nodeVec[i].Vnodes[j];
Point3 viCtr (center (idx, nodeVec));
Index3 idx3;
for(int d = 0; d < dim; d++) {
idx3(d) = int (round ((viCtr[d] - gNodeIdxCtr[d]) / D));
}
id = (idx3(0) + 3) + (idx3(1) + 3) * 7 + (idx3(2) + 3) * 7 * 7;
f->tlist_h_[t_index] = id;
t_index++;
}
}
}
/* No need for these on the host */
/* f->vlist_src_d_ */
/* f->vlist_trg_d_ */
/* f->vlist_trans_d_ */
f->RP_n_ = RP->n;
/* No need for these on the host */
/* f->RP_X_d_ */
/* f->RP_Y_d_ */
/* f->RP_Z_d_ */
/* IFFT */
f->SP_DC_n_ = pln_size (DC, SP);
f->SP_DC_n_padded_ = (((f->SP_DC_n_ + real_padding - 1) / real_padding) *
real_padding);
/* TRG_DWN_CHK_VAL_h_ */
f->TRG_DWN_CHK_VAL_h_ = (dtype*) malloc (nodeVec.size () *
f->SP_DC_n_padded_ * sizeof (dtype));
assert (f->TRG_DWN_CHK_VAL_h_);
/* DOWN_CALC */
/* path2Node */
f->path2Node_h_ = (int3*) malloc (nodeVec.size () * sizeof (int3));
assert (f->path2Node_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->path2Node_h_[i].x = (nodeVec[i].path2Node)(0);
f->path2Node_h_[i].y = (nodeVec[i].path2Node)(1);
f->path2Node_h_[i].z = (nodeVec[i].path2Node)(2);
}
/* parent */
/* Not needed - use children structure instead */
/* Actually, now I need it */
f->parent_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->parent_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->parent_h_[i] = nodeVec[i].parent;
}
/* trg_dwn_equ_den */
f->SP_DE_n_ = pln_size (DE, SP);
f->SP_DE_n_padded = (((f->SP_DE_n_ + real_padding - 1) / real_padding) *
real_padding);
/* DC2DE_mat */
stopwatch_start (timer_);
compute_DC2DE_mat (TM, SP);
t_subtract += stopwatch_stop (timer_);
f->DC2DE_r = pln_size (DE, SP);
f->DC2DE_r_padded = (((f->DC2DE_r + real_padding - 1) / real_padding) *
real_padding);
f->DC2DE_c = pln_size (DC, SP);
f->DC2DE_h_ = (real_t*) malloc (f->DC2DE_r_padded * f->DC2DE_c *
sizeof (real_t));
assert (f->DC2DE_h_);
for(i = 0; i < f->DC2DE_c ; i++) {
memcpy (&f->DC2DE_h_[i * f->DC2DE_r_padded],
&TM->DC2DE[i * f->DC2DE_r],
f->DC2DE_r * sizeof (real_t));
}
/* DE2DC_mat[8] */
stopwatch_start (timer_);
TM->DE2DC = (real_t**) malloc (sizeof (real_t*) * 2 * 2 * 2);
for(int a = 0; a < 2; a++) {
for(int b = 0; b < 2; b++) {
for(int c = 0; c < 2 ; c++) {
Index3 idx(a, b, c);
compute_DE2DC_mat (idx, TM, SP);
}
}
}
t_subtract += stopwatch_stop (timer_);
f->DE2DC_r = pln_size (DC, SP);
f->DE2DC_r_padded = (((f->DE2DC_r + real_padding - 1) / real_padding) *
real_padding);
f->DE2DC_c = pln_size (DE, SP);
f->DE2DC_h_ = (real_t*) malloc ((2 * 2 * 2) * f->DE2DC_r_padded * f->DE2DC_c *
sizeof (real_t));
assert (f->DE2DC_h_);
for(i = 0; i < 2 * 2 * 2; i++) {
for(j = 0; j < f->DE2DC_c; j++) {
real_t* temp_tm = TM->DE2DC[i];
memcpy (&f->DE2DC_h_[i * f->DE2DC_r_padded * f->DE2DC_c +
j * f->DE2DC_r_padded],
&temp_tm[j * f->DE2DC_r],
f->DE2DC_r * sizeof (real_t));
}
}
/* down_calc configuration */
f->expansion_depth = 2;
f->num_nodes_expansion = (int) pow (8.0, f->expansion_depth);
f->expansion_offset = 0;
for(i = 0; i < f->expansion_depth; i++) {
f->expansion_offset += (int) pow (8.0, i);
}
/* down_calc SP[DE] */
f->SP_DE_h_ = (dtype*) malloc (3 * f->SP_DE_n_padded * sizeof (dtype));
assert (f->SP_DE_h_);
memcpy (&f->SP_DE_h_[0], SP[DE].x, SP[DE].n * sizeof (dtype));
memcpy (&f->SP_DE_h_[f->SP_DE_n_padded], SP[DE].y, SP[DE].n * sizeof (dtype));
memcpy (&f->SP_DE_h_[2 * f->SP_DE_n_padded], SP[DE].z,
SP[DE].n * sizeof (dtype));
//t_data_cpu = stopwatch_stop (timer) - t_subtract;
t_data_cpu = stopwatch_stop (timer);
fprintf (stderr, "==> Time: %Lg secs\n", t_data_cpu);
/* WLIST_CALC */
f->SP_UE_n_ = pln_size (UE, SP);
f->SP_UE_n_padded = (((f->SP_UE_n_ + real_padding - 1) / real_padding) *
real_padding);
f->SP_UE_h_ = (dtype*) malloc (3 * f->SP_UE_n_padded * sizeof (dtype));
assert (f->SP_UE_h_);
memcpy (&f->SP_UE_h_[0], SP[UE].x, SP[UE].n * sizeof (dtype));
memcpy (&f->SP_UE_h_[f->SP_UE_n_padded], SP[UE].y, SP[UE].n * sizeof (dtype));
memcpy (&f->SP_UE_h_[2 * f->SP_UE_n_padded], SP[UE].z,
SP[UE].n * sizeof (dtype));
wlist_create_wlist (&f->W_h_, nodeVec.size (), All_N);
f->srcNum_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->srcNum_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->srcNum_h_[i] = nodeVec[i].srcNum;
}
/* XLIST_CALC */
f->SP_DC_h_ = (dtype*) malloc (3 * f->SP_DC_n_padded_ * sizeof (dtype));
assert (f->SP_DC_h_);
memcpy (&f->SP_DC_h_[0], SP[DC].x, SP[DC].n * sizeof (dtype));
memcpy (&f->SP_DC_h_[f->SP_DC_n_padded_], SP[DC].y, SP[DC].n * sizeof (dtype));
memcpy (&f->SP_DC_h_[2 * f->SP_DC_n_padded_], SP[DC].z,
SP[DC].n * sizeof (dtype));
xlist_create_xlist (&f->X_h_, nodeVec.size (), All_N);
f->trgNum_h_ = (int*) malloc (nodeVec.size () * sizeof (int));
assert (f->trgNum_h_);
for(i = 0; i < nodeVec.size (); i++) {
f->trgNum_h_[i] = nodeVec[i].trgNum;
}
#if 0
long int bytes_up = 0;
/* source boxes */
bytes_up += 4 * f->S_h_.Bptr_[f->S_h_.n_boxes_] * sizeof (dtype);
bytes_up += (f->S_h_.n_boxes_ + 1) * sizeof (int);
bytes_up += (f->S_h_.n_boxes_) * sizeof (int);
/* Radius */
bytes_up += nodeVec.size () * sizeof (dtype);
/* center */
bytes_up += 3 * nodeVec.size () * sizeof (dtype);
/* SP_UC */
bytes_up += 3 * f->SP_UC_size_padded * sizeof (dtype);
/* UC2UE */
bytes_up += f->UC2UE_r_padded * f->UC2UE_c * sizeof (dtype);
/* src_upw_equ_den */
bytes_up += nodeVec.size () * f->UC2UE_r_padded * sizeof (dtype);
/* child */
bytes_up += nodeVec.size () * sizeof (int);
/* UE2UC */
bytes_up += 8 * f->UE2UC_r_padded * f->UE2UC_c * sizeof (dtype);
/* tag */
bytes_up += nodeVec.size () * sizeof (int);
/* depth */
bytes_up += nodeVec.size () * sizeof (int);
double mega_bytes_up = (1.0 * bytes_up/ 1000000);
printf("VLIST requires %g mega bytes of data\n", mega_bytes_up);
#endif
#if 0
long int bytes_vlist = 0;
/* DEPTH */
bytes_vlist += nodeVec.size () * sizeof (int);
/* SRC_UPW_EQU_DEN */
bytes_vlist += nodeVec.size () * f->UC2UE_r_padded * sizeof (dtype);
/* REG_DEN */
bytes_vlist += nodeVec.size () * f->reg_den_size * sizeof (dtype);
/* VLIST_SRC */
bytes_vlist += nodeVec.size () * f->vlist_array_size * sizeof (dtype);
/* TT */
bytes_vlist += f->trans_arrays_num * f->RP_n_ * sizeof (dtype);
/* VLIST_TRANS */
bytes_vlist += f->trans_arrays_num * f->vlist_array_size * sizeof (dtype);
/* VLIST_TRG */
bytes_vlist += nodeVec.size () * f->vlist_array_size * sizeof (dtype);
/* VLIST_TLIST */
bytes_vlist += (nodeVec.size () + 1) * sizeof (int);
bytes_vlist += list_size * sizeof (int);
bytes_vlist += list_size * sizeof (int);
/* REG_DEN_IFFT */
bytes_vlist += nodeVec.size () * f->reg_den_size * sizeof (dtype);
/* TRG_DWN_CHK_VAL */
bytes_vlist += nodeVec.size () * f->SP_DC_n_padded_ * sizeof (dtype);
double mega_bytes_vlist = (1.0 * bytes_vlist / 1000000);
printf("VLIST requires %g mega bytes of data\n", mega_bytes_vlist);
#endif
/* ============================================================= */
/* GPU SIDE
*/
/* this is done to set up the GPU */
real_t* tmp_g;
cutilSafeCall (cudaMalloc ((void**)&tmp_g, 1024 * 1024));
fprintf (stderr, "Creating GPU Data Structures ... ");
stopwatch_start (timer);
#if __SOURCE_BOX__
alloc__SOURCE_BOX__ (f);
#endif
#if __TARGET_BOX__
alloc__TARGET_BOX__ (f);
#endif
#if __U_LIST__
alloc__U_LIST__ (f);
#endif
#if __TAG__
alloc__TAG__ (f);
#endif
#if __DEPTH__
alloc__DEPTH__ (f);
#endif
#if __CHILDREN__
alloc__CHILDREN__ (f);
#endif
#if __RADIUS__
alloc__RADIUS__ (f);
#endif
#if __CENTER__
alloc__CENTER__ (f);
#endif
#if __SP_UC__
alloc__SP_UC__ (f);
#endif
#if __UC2UE__
alloc__UC2UE__ (f);
#endif
#if __UE2UC__
alloc__UE2UC__ (f);
#endif
#if __SRC_UPW_EQU_DEN__
alloc__SRC_UPW_EQU_DEN__ (f);
#endif
/* Vlist */
#if __VLIST_SRC__
alloc__VLIST_SRC__ (f);
#endif
#if __REG_DEN__
alloc__REG_DEN__ (f);
#endif
#if __TT__
alloc__TT__ (f);
#endif
#if __VLIST_TRANS__
alloc__VLIST_TRANS__ (f);
#endif
#if __VLIST_TRG__
alloc__VLIST_TRG__ (f);
#endif
#if __VLIST_TLIST__
alloc__VLIST_TLIST__ (f);
#endif
#if 0
#if __REG_DEN_IFFT__
/* ------------------------------------------------------------ */
/* IFFT */
cutilSafeCall (cudaMalloc ((void**)&f->reg_den_ifft_d_,
nodeVec.size () * f->reg_den_size *
sizeof (dtype)));
assert (f->reg_den_ifft_d_);
/* ------------------------------------------------------------ */
#endif
#endif
#if __TRG_DWN_CHK_VAL__
alloc__TRG_DWN_CHK_VAL__ (f);
#endif
/* DOWN_CALC */
#if __PATH2NODE__
alloc__PATH2NODE__ (f);
#endif
#if __PARENT__
alloc__PARENT__ (f);
#endif
#if __TRG_DWN_EQU_DEN__
alloc__TRG_DWN_EQU_DEN__ (f);
#endif
#if __DC2DE__
alloc__DC2DE__ (f);
#endif
#if __DE2DC__
alloc__DE2DC__ (f);
#endif
#if __SP_DE__
alloc__SP_DE__ (f);
#endif
#if __SP_UE__
alloc__SP_UE__ (f);
#endif
#if __W_LIST__
alloc__W_LIST__ (f);
#endif
#if __SRCNUM__
alloc__SRCNUM__ (f);
#endif
#if __SP_DC__
alloc__SP_DC__ (f);
#endif
#if __X_LIST__
alloc__X_LIST__ (f);
#endif
#if __TRGNUM__
alloc__TRGNUM__ (f);
#endif
t_data_gpu = stopwatch_stop (timer);
fprintf (stderr, "==> Time: %Lg secs\n", t_data_gpu);
/* ============================================================= */
/* Copy data over to GPU
*/
fprintf (stderr, "Copying Data over PCIE ... ");
stopwatch_start (timer);
#if __SOURCE_BOX__
xfer__SOURCE_BOX__ (f);
#endif
#if __TARGET_BOX__
xfer__TARGET_BOX__ (f);
#endif
#if __U_LIST__
xfer__U_LIST__ (f);
#endif
#if __TAG__
xfer__TAG__ (f);
#endif
#if __DEPTH__
xfer__DEPTH__ (f);
#endif
#if __CHILDREN__
xfer__CHILDREN__ (f);
#endif
/* center and radius */
#if __RADIUS__
xfer__RADIUS__ (f);
#endif
#if __CENTER__
xfer__CENTER__ (f);
#endif
/* SP[UC] */
#if __SP_UC__
xfer__SP_UC__ (f);
#endif
#if __UC2UE__
xfer__UC2UE__ (f);
#endif
#if __UE2UC__
xfer__UE2UC__ (f);
#endif
/* No copying necessary for SRC_UPW_EQU_DEN_d_ */
/* No copying necessary for vlist_src_d_, vlist_trg_d_, vlist_trans_d_*/
/* No copying necessary for tt and reg_den */
/* vlist, tlist and pointer */
#if __VLIST_TLIST__
xfer__VLIST_TLIST__ (f);
#endif
/* No copying necessary for reg_den_ifft_d_ */
/* No copying necessary for TRG_DWN_CHK_VAL_d_ */
/* DOWN_CALC */
/* path2Node */
#if __PATH2NODE__
xfer__PATH2NODE__ (f);
#endif
/* No copying necessary for TRG_DWN_EQU_DEN_d_ */
/* parent */
/* Not needed - use children structure instead */
/* Now I need it */
#if __PARENT__
xfer__PARENT__ (f);
#endif
#if __DC2DE__
xfer__DC2DE__ (f);
#endif
/* DE2DC_mat[8] */
#if __DE2DC__
xfer__DE2DC__ (f);
#endif
/* SP[DE] */
#if __SP_DE__
xfer__SP_DE__ (f);
#endif
/* SP[UE] */
#if __SP_UE__
xfer__SP_UE__ (f);
#endif
#if __W_LIST__
xfer__W_LIST__ (f);
#endif
#if __SRCNUM__
xfer__SRCNUM__ (f);
#endif
#if __SP_DC__
xfer__SP_DC__ (f);
#endif
#if __X_LIST__
xfer__X_LIST__ (f);
#endif
#if __TRGNUM__
xfer__TRGNUM__ (f);
#endif
t_pcie = stopwatch_stop (timer);
fprintf (stderr, "==> Time: %Lg secs\n", t_pcie);
return f;
}
|
f5e2f32f0ac7e2538e294894cc876362e5b0478b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <hipcub/hipcub.hpp> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/tensorrt/plugin/qkv_to_context_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_utils.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/platform/device_context.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
__global__ void transpose(T *src, T *dst, const int batch_size,
const int seq_len, const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
__global__ void TransposeQkvKernel(const int H, const T *input, T *output) {
// Input: BxSx3xNxH
// Bias: 3xSxB
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] = input[in_offset + i];
}
inline void TransposeQKV(const int batch, const int seq_len,
const int head_size, const int head_num,
const float *input, float *output,
hipStream_t stream) {
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 4));
hipLaunchKernelGGL(( TransposeQkvKernel<float4>), dim3(grid), dim3(block), 0, stream, h, input4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 2));
hipLaunchKernelGGL(( TransposeQkvKernel<float2>), dim3(grid), dim3(block), 0, stream, h, input2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024));
hipLaunchKernelGGL(( TransposeQkvKernel<float>), dim3(grid), dim3(block), 0, stream, head_size, input,
output);
}
}
inline void TransposeQKV(const int batch, const int seq_len,
const int head_size, const int head_num,
const half *input, half *output, hipStream_t stream) {
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 8 == 0 && scratch_size % 8 == 0) {
int h = head_size / 8;
const int4 *input4 = reinterpret_cast<const int4 *>(input);
int4 *output4 = reinterpret_cast<int4 *>(output);
dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 8));
hipLaunchKernelGGL(( TransposeQkvKernel<int4>), dim3(grid), dim3(block), 0, stream, h, input4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const half2 *input2 = reinterpret_cast<const half2 *>(input);
half2 *output2 = reinterpret_cast<half2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 2));
hipLaunchKernelGGL(( TransposeQkvKernel<half2>), dim3(grid), dim3(block), 0, stream, h, input2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024));
hipLaunchKernelGGL(( TransposeQkvKernel<half>), dim3(grid), dim3(block), 0, stream, head_size, input,
output);
}
}
int QkvToContextPluginDynamic::initialize() { return 0; }
nvinfer1::DimsExprs QkvToContextPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
// input[0], (B, S, 3 * N * H, 1, 1)
// input[1], (B, head_num, seq_len, seq_len)
// output, (B, seq_len, hidden)
PADDLE_ENFORCE_EQ(output_index, 0,
platform::errors::InvalidArgument(
"There is only one output of the EmbEltwiseLayernorm, "
"so the index should be zero,"
"but it's (%d)",
output_index));
PADDLE_ENFORCE_EQ(
nb_inputs, 2,
platform::errors::InvalidArgument(
"The Input of the EmbEltwiseLayernorm should be 3, but we found "
"it has (%d) inputs",
nb_inputs));
nvinfer1::DimsExprs ret;
ret.nbDims = 3;
ret.d[0] = inputs[0].d[0];
ret.d[1] = inputs[0].d[1];
ret.d[2] = expr_builder.constant(head_size_ * head_number_);
return ret;
}
bool QkvToContextPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
#ifdef TRT_PLUGIN_FP16_AVALIABLE
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#else
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos == 1) {
return in.type == prev.type && in.format == prev.format;
}
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType QkvToContextPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(
index, 0, platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
template <typename T>
__global__ void apply_scale(T *data, T scale, int n) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int tid = blockIdx.x * blockDim.x + threadIdx.x;
data[tid] = data[tid] * scale;
#endif
}
int QkvToContextPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, hipStream_t stream) {
auto input_dims = input_desc[0].dims;
int input_num = ProductDim(input_dims);
// input[0], (B, S, 3 * N * H, 1, 1)
int batch = input_dims.d[0];
int seq_len = input_dims.d[1];
framework::Tensor multihead_temp_tensor;
int scratch_size = batch * head_number_ * seq_len * seq_len * 1;
int device_id;
hipGetDevice(&device_id);
multihead_temp_tensor.Resize({scratch_size + input_num});
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp32";
auto *multihead_temp_data = multihead_temp_tensor.mutable_data<float>(
platform::CUDAPlace(device_id));
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
const float *input0_data = static_cast<const float *>(inputs[0]);
const float *input1_data = static_cast<const float *>(inputs[1]);
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransposeQKV(batch, seq_len, head_size_, head_number_, input0_data, tptr,
stream);
auto *device_ctx = static_cast<platform::CUDADeviceContext *>(
platform::DeviceContextPool::Instance().Get(
platform::CUDAPlace(device_id)));
const platform::CUDADeviceContext &dev_ctx = *device_ctx;
operators::math::MultiHeadGPUComputeFunctor<float> multihead_compute_func;
multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_,
qkptr, input1_data, tptr, scale_,
static_cast<float>(0.0));
int grid = batch * head_number_ * seq_len;
int block = head_size_;
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( transpose<float>), dim3(grid), dim3(block), 0, stream, tptr, output, batch, seq_len,
head_number_, head_size_);
} else if (input_type == nvinfer1::DataType::kHALF) {
#ifdef TRT_PLUGIN_FP16_AVALIABLE
VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp16";
auto *multihead_temp_data =
multihead_temp_tensor.mutable_data<int16_t>( // NOLINT
platform::CUDAPlace(device_id));
half *qkptr = reinterpret_cast<half *>(multihead_temp_data);
half *tptr = qkptr + scratch_size;
const half *input0_data = static_cast<const half *>(inputs[0]);
const half *input1_data = static_cast<const half *>(inputs[1]);
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransposeQKV(batch, seq_len, head_size_, head_number_, input0_data, tptr,
stream);
auto *device_ctx = static_cast<platform::CUDADeviceContext *>(
platform::DeviceContextPool::Instance().Get(
platform::CUDAPlace(device_id)));
int n_q = seq_len * head_number_ * head_size_ * batch;
constexpr int threads = 128;
int blocks = (n_q + threads - 1) / threads;
hipLaunchKernelGGL(( apply_scale), dim3(blocks), dim3(threads), 0, stream, tptr, static_cast<half>(scale_),
n_q);
const platform::CUDADeviceContext &dev_ctx = *device_ctx;
operators::math::MultiHeadGPUComputeFunctor<half> multihead_compute_func;
multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_,
qkptr, input1_data, tptr, half(1.), half(0.0));
int grid = batch * head_number_ * seq_len;
int block = head_size_;
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( transpose<half>), dim3(grid), dim3(block), 0, stream, tptr, output, batch, seq_len,
head_number_, head_size_);
#else
PADDLE_THROW(platform::errors::Fatal(
"The Ernie(Bert) TensorRT Plugin should be "
"complied with CUDA version >= 10.0 when running with fp16. "
"Please recomplie it or try to use fp32 by set "
"config.SetTRTDynamicShapeInfo(min_input_shape, "
"max_input_shape, opt_input_shape, true"));
#endif
} else {
PADDLE_THROW(platform::errors::Fatal(
"The QKV TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| f5e2f32f0ac7e2538e294894cc876362e5b0478b.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <cub/cub.cuh> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/tensorrt/plugin/qkv_to_context_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_utils.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/platform/device_context.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
__global__ void transpose(T *src, T *dst, const int batch_size,
const int seq_len, const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
__global__ void TransposeQkvKernel(const int H, const T *input, T *output) {
// Input: BxSx3xNxH
// Bias: 3xSxB
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] = input[in_offset + i];
}
inline void TransposeQKV(const int batch, const int seq_len,
const int head_size, const int head_num,
const float *input, float *output,
cudaStream_t stream) {
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 4));
TransposeQkvKernel<float4><<<grid, block, 0, stream>>>(h, input4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 2));
TransposeQkvKernel<float2><<<grid, block, 0, stream>>>(h, input2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024));
TransposeQkvKernel<float><<<grid, block, 0, stream>>>(head_size, input,
output);
}
}
inline void TransposeQKV(const int batch, const int seq_len,
const int head_size, const int head_num,
const half *input, half *output, cudaStream_t stream) {
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 8 == 0 && scratch_size % 8 == 0) {
int h = head_size / 8;
const int4 *input4 = reinterpret_cast<const int4 *>(input);
int4 *output4 = reinterpret_cast<int4 *>(output);
dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 8));
TransposeQkvKernel<int4><<<grid, block, 0, stream>>>(h, input4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const half2 *input2 = reinterpret_cast<const half2 *>(input);
half2 *output2 = reinterpret_cast<half2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024 * 2));
TransposeQkvKernel<half2><<<grid, block, 0, stream>>>(h, input2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num, 1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num, head_size, 1024));
TransposeQkvKernel<half><<<grid, block, 0, stream>>>(head_size, input,
output);
}
}
int QkvToContextPluginDynamic::initialize() { return 0; }
nvinfer1::DimsExprs QkvToContextPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
// input[0], (B, S, 3 * N * H, 1, 1)
// input[1], (B, head_num, seq_len, seq_len)
// output, (B, seq_len, hidden)
PADDLE_ENFORCE_EQ(output_index, 0,
platform::errors::InvalidArgument(
"There is only one output of the EmbEltwiseLayernorm, "
"so the index should be zero,"
"but it's (%d)",
output_index));
PADDLE_ENFORCE_EQ(
nb_inputs, 2,
platform::errors::InvalidArgument(
"The Input of the EmbEltwiseLayernorm should be 3, but we found "
"it has (%d) inputs",
nb_inputs));
nvinfer1::DimsExprs ret;
ret.nbDims = 3;
ret.d[0] = inputs[0].d[0];
ret.d[1] = inputs[0].d[1];
ret.d[2] = expr_builder.constant(head_size_ * head_number_);
return ret;
}
bool QkvToContextPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
#ifdef TRT_PLUGIN_FP16_AVALIABLE
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#else
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
if (pos == 1) {
return in.type == prev.type && in.format == prev.format;
}
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType QkvToContextPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(
index, 0, platform::errors::InvalidArgument(
"The EmbEltwiseLayernorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
template <typename T>
__global__ void apply_scale(T *data, T scale, int n) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int tid = blockIdx.x * blockDim.x + threadIdx.x;
data[tid] = data[tid] * scale;
#endif
}
int QkvToContextPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, cudaStream_t stream) {
auto input_dims = input_desc[0].dims;
int input_num = ProductDim(input_dims);
// input[0], (B, S, 3 * N * H, 1, 1)
int batch = input_dims.d[0];
int seq_len = input_dims.d[1];
framework::Tensor multihead_temp_tensor;
int scratch_size = batch * head_number_ * seq_len * seq_len * 1;
int device_id;
cudaGetDevice(&device_id);
multihead_temp_tensor.Resize({scratch_size + input_num});
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp32";
auto *multihead_temp_data = multihead_temp_tensor.mutable_data<float>(
platform::CUDAPlace(device_id));
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
const float *input0_data = static_cast<const float *>(inputs[0]);
const float *input1_data = static_cast<const float *>(inputs[1]);
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransposeQKV(batch, seq_len, head_size_, head_number_, input0_data, tptr,
stream);
auto *device_ctx = static_cast<platform::CUDADeviceContext *>(
platform::DeviceContextPool::Instance().Get(
platform::CUDAPlace(device_id)));
const platform::CUDADeviceContext &dev_ctx = *device_ctx;
operators::math::MultiHeadGPUComputeFunctor<float> multihead_compute_func;
multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_,
qkptr, input1_data, tptr, scale_,
static_cast<float>(0.0));
int grid = batch * head_number_ * seq_len;
int block = head_size_;
float *output = static_cast<float *>(outputs[0]);
transpose<float><<<grid, block, 0, stream>>>(tptr, output, batch, seq_len,
head_number_, head_size_);
} else if (input_type == nvinfer1::DataType::kHALF) {
#ifdef TRT_PLUGIN_FP16_AVALIABLE
VLOG(1) << "TRT Plugin DataType selected. QkvToContext-->fp16";
auto *multihead_temp_data =
multihead_temp_tensor.mutable_data<int16_t>( // NOLINT
platform::CUDAPlace(device_id));
half *qkptr = reinterpret_cast<half *>(multihead_temp_data);
half *tptr = qkptr + scratch_size;
const half *input0_data = static_cast<const half *>(inputs[0]);
const half *input1_data = static_cast<const half *>(inputs[1]);
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransposeQKV(batch, seq_len, head_size_, head_number_, input0_data, tptr,
stream);
auto *device_ctx = static_cast<platform::CUDADeviceContext *>(
platform::DeviceContextPool::Instance().Get(
platform::CUDAPlace(device_id)));
int n_q = seq_len * head_number_ * head_size_ * batch;
constexpr int threads = 128;
int blocks = (n_q + threads - 1) / threads;
apply_scale<<<blocks, threads, 0, stream>>>(tptr, static_cast<half>(scale_),
n_q);
const platform::CUDADeviceContext &dev_ctx = *device_ctx;
operators::math::MultiHeadGPUComputeFunctor<half> multihead_compute_func;
multihead_compute_func(dev_ctx, batch, seq_len, head_number_, head_size_,
qkptr, input1_data, tptr, half(1.), half(0.0));
int grid = batch * head_number_ * seq_len;
int block = head_size_;
half *output = static_cast<half *>(outputs[0]);
transpose<half><<<grid, block, 0, stream>>>(tptr, output, batch, seq_len,
head_number_, head_size_);
#else
PADDLE_THROW(platform::errors::Fatal(
"The Ernie(Bert) TensorRT Plugin should be "
"complied with CUDA version >= 10.0 when running with fp16. "
"Please recomplie it or try to use fp32 by set "
"config.SetTRTDynamicShapeInfo(min_input_shape, "
"max_input_shape, opt_input_shape, true"));
#endif
} else {
PADDLE_THROW(platform::errors::Fatal(
"The QKV TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
8631443d5d5db111da1b946a6bfa53df3ad23397.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_layer_updater_schema.h"
#include "../neural_network_exception.h"
#include "../max_subsampling_layer.h"
#include "max_subsampling_layer_updater_hip.cuh"
#include "max_subsampling_layer_cudnn_updater_cuda.h"
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
max_subsampling_layer_updater_schema::max_subsampling_layer_updater_schema()
{
}
max_subsampling_layer_updater_schema::~max_subsampling_layer_updater_schema()
{
}
layer_updater_schema_smart_ptr max_subsampling_layer_updater_schema::create_specific() const
{
return layer_updater_schema_smart_ptr(new max_subsampling_layer_updater_schema());
}
const boost::uuids::uuid& max_subsampling_layer_updater_schema::get_uuid() const
{
return max_subsampling_layer::layer_guid;
}
layer_updater_cuda_smart_ptr max_subsampling_layer_updater_schema::create_updater_specific(
const layer_configuration_specific& input_configuration_specific,
const layer_configuration_specific& output_configuration_specific) const
{
layer_updater_cuda_smart_ptr res;
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
if (layer_derived->tiling)
throw neural_network_exception("There is no CUDA updater for max subsampling layer with tiling");
switch (output_configuration_specific.dimension_sizes.size())
{
case 1:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<1>());
break;
case 2:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<2>());
//res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_cudnn_updater_cuda());
break;
case 3:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<3>());
break;
case 4:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<4>());
break;
default:
throw neural_network_exception((boost::format("No CUDA updater for the max subsampling layer of %1% dimensions") % output_configuration_specific.dimension_sizes.size()).str());
}
return res;
}
}
}
| 8631443d5d5db111da1b946a6bfa53df3ad23397.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "max_subsampling_layer_updater_schema.h"
#include "../neural_network_exception.h"
#include "../max_subsampling_layer.h"
#include "max_subsampling_layer_updater_cuda.cuh"
#include "max_subsampling_layer_cudnn_updater_cuda.h"
#include <boost/format.hpp>
namespace nnforge
{
namespace cuda
{
max_subsampling_layer_updater_schema::max_subsampling_layer_updater_schema()
{
}
max_subsampling_layer_updater_schema::~max_subsampling_layer_updater_schema()
{
}
layer_updater_schema_smart_ptr max_subsampling_layer_updater_schema::create_specific() const
{
return layer_updater_schema_smart_ptr(new max_subsampling_layer_updater_schema());
}
const boost::uuids::uuid& max_subsampling_layer_updater_schema::get_uuid() const
{
return max_subsampling_layer::layer_guid;
}
layer_updater_cuda_smart_ptr max_subsampling_layer_updater_schema::create_updater_specific(
const layer_configuration_specific& input_configuration_specific,
const layer_configuration_specific& output_configuration_specific) const
{
layer_updater_cuda_smart_ptr res;
nnforge_shared_ptr<const max_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const max_subsampling_layer>(layer_schema);
if (layer_derived->tiling)
throw neural_network_exception("There is no CUDA updater for max subsampling layer with tiling");
switch (output_configuration_specific.dimension_sizes.size())
{
case 1:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<1>());
break;
case 2:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<2>());
//res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_cudnn_updater_cuda());
break;
case 3:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<3>());
break;
case 4:
res = layer_updater_cuda_smart_ptr(new max_subsampling_layer_updater_cuda<4>());
break;
default:
throw neural_network_exception((boost::format("No CUDA updater for the max subsampling layer of %1% dimensions") % output_configuration_specific.dimension_sizes.size()).str());
}
return res;
}
}
}
|
5cebe18fdee1e2430cfe3d3582d3efa89300c2a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KNNBinningV1.cuh"
using namespace OpenSteer;
#include "KNNBinData.cuh"
#include <thrust/sort.h>
//#include "DebugUtils.h"
//#define TIMING
// Kernel file function prototypes.
extern "C"
{
// Bind texCellIndices to the hipArray.
__host__ void KNNBinningV1BindTexture( hipArray * pCudaArray );
__host__ void KNNBinningV1UnbindTexture( void );
__host__ void KNNBinningV1KernelBindTextures( uint const* pdBCellStart,
uint const* pdBCellEnd,
uint const* pdBIndices,
float4 const* pdBPositionSorted,
uint const numCells,
uint const numB
);
__host__ void KNNBinningV1KernelUnbindTextures( void );
__host__ void KNNBinningV1ReorderDBBindTextures( float4 const* pdPosition,
uint const numAgents );
__host__ void KNNBinningV1ReorderDBUnbindTextures( void );
__global__ void KNNBinningV1BuildDB( float4 const* pdPosition, // In: Positions of each agent.
size_t * pdAgentIndices, // Out: Indices of each agent.
size_t * pdCellIndices, // Out: Indices of the cell each agent is in.
size_t const numAgents
);
__global__ void KNNBinningV1ReorderDB( uint const* pdAgentIndices, // In: (sorted) agent index.
uint const* pdCellIndices, // In: (sorted) cell index agent is in.
float4 * pdPositionSorted, // Out: Sorted agent positions.
uint * pdCellStart, // Out: Start index of this cell in pdCellIndices.
uint * pdCellEnd, // Out: End index of this cell in pdCellIndices.
size_t const numAgents
);
__global__ void KNNBinningV1Kernel( // Group A
float4 const* pdAPositionSorted, // In: Sorted group A positions.
uint const* pdAIndices, // In: Sorted group A indices
uint const numA, // In: Size of group A.
// Cell neighbor info.
int const radius, // In: Search radius (in cells) to consider.
// Output data.
uint * pdKNNIndices, // Out: Indices of K Nearest Neighbors in pdPosition.
float * pdKNNDistances, // Out: Distances of the K Nearest Neighbors in pdPosition.
uint const k, // In: Number of neighbors to consider.
uint const numB, // In: Size of group B.
bool const groupWithSelf // In: Are we testing this group with itself? (group A == group B)
);
}
#pragma region KNNBinningV1UpdateDBCUDA
KNNBinningV1UpdateDBCUDA::KNNBinningV1UpdateDBCUDA( BaseGroup * pGroup, KNNBinData * pKNNBinData )
: AbstractCUDAKernel( NULL, 1.f, 0 ),
m_pGroup( pGroup ),
m_pKNNBinData( pKNNBinData )
{
// Nothing to do.
}
void KNNBinningV1UpdateDBCUDA::init( void )
{
// Bind the lookup texture.
KNNBinningV1BindTexture( m_pKNNBinData->pdCellIndexArray() );
}
void KNNBinningV1UpdateDBCUDA::run( void )
{
dim3 grid = dim3( (m_pGroup->Size() + THREADSPERBLOCK - 1) / THREADSPERBLOCK );
dim3 block = dim3( THREADSPERBLOCK );
// Gather required data.
float4 const* pdPosition = m_pGroup->pdPosition();
uint * pdCellIndices = m_pGroup->GetKNNDatabase().pdCellIndices();
uint * pdAgentIndicesSorted = m_pGroup->GetKNNDatabase().pdAgentIndicesSorted();
uint * pdCellIndicesSorted = m_pGroup->GetKNNDatabase().pdCellIndicesSorted();
float4 * pdPositionSorted = m_pGroup->GetKNNDatabase().pdPositionSorted();
uint * pdCellStart = m_pGroup->GetKNNDatabase().pdCellStart();
uint * pdCellEnd = m_pGroup->GetKNNDatabase().pdCellEnd();
uint const& numAgents = m_pGroup->Size();
#if defined TIMING
//
// TIMING: hard to get exact times with profiling, too many operations.
//
// Events for timing the complete operation.
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
#endif
// Call KNNBinningBuildDB to build the database.
hipLaunchKernelGGL(( KNNBinningV1BuildDB), dim3(grid), dim3(block) , 0, 0, pdPosition, pdAgentIndicesSorted, pdCellIndices, numAgents );
cutilCheckMsg( "KNNBinningBuildDB failed." );
CUDA_SAFE_CALL( hipDeviceSynchronize() );
// Copy pdCellIndices to pdCellIndicesSorted.
CUDA_SAFE_CALL( hipMemcpy( pdCellIndicesSorted, pdCellIndices, numAgents * sizeof(uint), hipMemcpyDeviceToDevice ) );
// Sort pdAgentIndicesSorted on pdCellIndicesSorted using thrust.
thrust::sort_by_key( thrust::device_ptr<uint>( pdCellIndicesSorted ),
thrust::device_ptr<uint>( pdCellIndicesSorted + numAgents ),
thrust::device_ptr<uint>( pdAgentIndicesSorted ) );
// Set all cells to empty.
CUDA_SAFE_CALL( hipMemset( pdCellStart, 0xffffffff, m_pKNNBinData->getNumCells() * sizeof(uint) ) );
// Bind the textures.
KNNBinningV1ReorderDBBindTextures( pdPosition, numAgents );
// Call KNNBinningReorderDB to re-order the data in the DB.
hipLaunchKernelGGL(( KNNBinningV1ReorderDB), dim3(grid), dim3(block) , 0, 0, pdAgentIndicesSorted, pdCellIndicesSorted, pdPositionSorted, pdCellStart, pdCellEnd, numAgents ); cutilCheckMsg( "KNNBinningReorderDB failed." );
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
// Unbind the textures.
KNNBinningV1ReorderDBUnbindTextures();
#if defined TIMING
//
// TIMING:
//
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
char szString[128] = {0};
sprintf_s( szString, "KNNBinningV1UpdateDBCUDA,%f\n", elapsedTime );
//OutputDebugStringToFile( szString );
OutputDebugString( szString );
// Destroy the events.
hipEventDestroy( start );
hipEventDestroy( stop );
#endif
}
void KNNBinningV1UpdateDBCUDA::close( void )
{
// Unbind the texture.
KNNBinningV1UnbindTexture();
// The AgentGroup's database has now changed.
m_pGroup->SetSyncHost();
}
#pragma endregion
#pragma region KNNBinningV1CUDA
KNNBinningV1CUDA::KNNBinningV1CUDA( AgentGroup * pAgentGroup, KNNData * pKNNData, KNNBinData * pKNNBinData, BaseGroup * pOtherGroup, uint const searchRadius )
: AbstractCUDAKernel( pAgentGroup, 1.f, 0 ),
m_pKNNData( pKNNData ),
m_pKNNBinData( pKNNBinData ),
m_pOtherGroup( pOtherGroup ),
m_searchRadius( searchRadius )
{
}
void KNNBinningV1CUDA::init( void )
{
// Bind the cell indices texture.
//KNNBinningV1CUDABindTexture( m_pKNNBinData->pdCellIndexArray() );
}
void KNNBinningV1CUDA::run( void )
{
dim3 grid = gridDim();
dim3 block = blockDim();
// Gather the required data.
float4 const* pdAPositionSorted = m_pAgentGroup->GetKNNDatabase().pdPositionSorted();
uint const* pdAIndices = m_pAgentGroup->GetKNNDatabase().pdAgentIndicesSorted();
uint const& numA = getNumAgents();
float4 const* pdBPositionSorted = m_pOtherGroup->GetKNNDatabase().pdPositionSorted();
uint const* pdBIndices = m_pOtherGroup->GetKNNDatabase().pdAgentIndicesSorted();
uint const& numB = m_pOtherGroup->Size();
uint const* pdBCellStart = m_pOtherGroup->GetKNNDatabase().pdCellStart();
uint const* pdBCellEnd = m_pOtherGroup->GetKNNDatabase().pdCellEnd();
uint const& numCells = m_pKNNBinData->getNumCells();
uint * pdKNNIndices = m_pKNNData->pdKNNIndices();
float * pdKNNDistances = m_pKNNData->pdKNNDistances();
uint const& k = m_pOtherGroup->GetKNNDatabase().k();
bool const groupWithSelf = m_pAgentGroup == m_pOtherGroup;
// Compute the size of shared memory needed for each block.
size_t shMemSize = k * THREADSPERBLOCK * (sizeof(float) + sizeof(uint));
#if defined TIMING
//
// TIMING: hard to get exact times with profiling, too many operations.
//
// Events for timing the complete operation.
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
#endif
// Bind the textures.
KNNBinningV1KernelBindTextures( pdBCellStart, pdBCellEnd, pdBIndices, pdBPositionSorted, numCells, numB );
KNNBinningV1BindTexture( m_pKNNBinData->pdCellIndexArray() );
// Call the KNNBinning kernel.
hipLaunchKernelGGL(( KNNBinningV1Kernel), dim3(grid), dim3(block), shMemSize , 0, pdAPositionSorted,
pdAIndices,
numA,
m_searchRadius,
pdKNNIndices,
pdKNNDistances,
k,
numB,
groupWithSelf
);
cutilCheckMsg( "KNNBinningKernel failed." );
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
// Unbind the textures.
KNNBinningV1KernelUnbindTextures();
KNNBinningV1UnbindTexture();
#if defined TIMING && defined _DEBUG
//
// TIMING:
//
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
char szString[128] = {0};
sprintf_s( szString, "KNNBinningV1CUDA,%f\n", elapsedTime );
//OutputDebugStringToFile( szString );
OutputDebugString( szString );
// Destroy the events.
hipEventDestroy( start );
hipEventDestroy( stop );
#endif
}
void KNNBinningV1CUDA::close( void )
{
// The KNNData has most likely changed.
m_pKNNData->setSyncHost();
}
#pragma endregion
| 5cebe18fdee1e2430cfe3d3582d3efa89300c2a4.cu | #include "KNNBinningV1.cuh"
using namespace OpenSteer;
#include "KNNBinData.cuh"
#include <thrust/sort.h>
//#include "DebugUtils.h"
//#define TIMING
// Kernel file function prototypes.
extern "C"
{
// Bind texCellIndices to the cudaArray.
__host__ void KNNBinningV1BindTexture( cudaArray * pCudaArray );
__host__ void KNNBinningV1UnbindTexture( void );
__host__ void KNNBinningV1KernelBindTextures( uint const* pdBCellStart,
uint const* pdBCellEnd,
uint const* pdBIndices,
float4 const* pdBPositionSorted,
uint const numCells,
uint const numB
);
__host__ void KNNBinningV1KernelUnbindTextures( void );
__host__ void KNNBinningV1ReorderDBBindTextures( float4 const* pdPosition,
uint const numAgents );
__host__ void KNNBinningV1ReorderDBUnbindTextures( void );
__global__ void KNNBinningV1BuildDB( float4 const* pdPosition, // In: Positions of each agent.
size_t * pdAgentIndices, // Out: Indices of each agent.
size_t * pdCellIndices, // Out: Indices of the cell each agent is in.
size_t const numAgents
);
__global__ void KNNBinningV1ReorderDB( uint const* pdAgentIndices, // In: (sorted) agent index.
uint const* pdCellIndices, // In: (sorted) cell index agent is in.
float4 * pdPositionSorted, // Out: Sorted agent positions.
uint * pdCellStart, // Out: Start index of this cell in pdCellIndices.
uint * pdCellEnd, // Out: End index of this cell in pdCellIndices.
size_t const numAgents
);
__global__ void KNNBinningV1Kernel( // Group A
float4 const* pdAPositionSorted, // In: Sorted group A positions.
uint const* pdAIndices, // In: Sorted group A indices
uint const numA, // In: Size of group A.
// Cell neighbor info.
int const radius, // In: Search radius (in cells) to consider.
// Output data.
uint * pdKNNIndices, // Out: Indices of K Nearest Neighbors in pdPosition.
float * pdKNNDistances, // Out: Distances of the K Nearest Neighbors in pdPosition.
uint const k, // In: Number of neighbors to consider.
uint const numB, // In: Size of group B.
bool const groupWithSelf // In: Are we testing this group with itself? (group A == group B)
);
}
#pragma region KNNBinningV1UpdateDBCUDA
KNNBinningV1UpdateDBCUDA::KNNBinningV1UpdateDBCUDA( BaseGroup * pGroup, KNNBinData * pKNNBinData )
: AbstractCUDAKernel( NULL, 1.f, 0 ),
m_pGroup( pGroup ),
m_pKNNBinData( pKNNBinData )
{
// Nothing to do.
}
void KNNBinningV1UpdateDBCUDA::init( void )
{
// Bind the lookup texture.
KNNBinningV1BindTexture( m_pKNNBinData->pdCellIndexArray() );
}
void KNNBinningV1UpdateDBCUDA::run( void )
{
dim3 grid = dim3( (m_pGroup->Size() + THREADSPERBLOCK - 1) / THREADSPERBLOCK );
dim3 block = dim3( THREADSPERBLOCK );
// Gather required data.
float4 const* pdPosition = m_pGroup->pdPosition();
uint * pdCellIndices = m_pGroup->GetKNNDatabase().pdCellIndices();
uint * pdAgentIndicesSorted = m_pGroup->GetKNNDatabase().pdAgentIndicesSorted();
uint * pdCellIndicesSorted = m_pGroup->GetKNNDatabase().pdCellIndicesSorted();
float4 * pdPositionSorted = m_pGroup->GetKNNDatabase().pdPositionSorted();
uint * pdCellStart = m_pGroup->GetKNNDatabase().pdCellStart();
uint * pdCellEnd = m_pGroup->GetKNNDatabase().pdCellEnd();
uint const& numAgents = m_pGroup->Size();
#if defined TIMING
//
// TIMING: hard to get exact times with profiling, too many operations.
//
// Events for timing the complete operation.
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
#endif
// Call KNNBinningBuildDB to build the database.
KNNBinningV1BuildDB<<< grid, block >>>( pdPosition, pdAgentIndicesSorted, pdCellIndices, numAgents );
cutilCheckMsg( "KNNBinningBuildDB failed." );
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
// Copy pdCellIndices to pdCellIndicesSorted.
CUDA_SAFE_CALL( cudaMemcpy( pdCellIndicesSorted, pdCellIndices, numAgents * sizeof(uint), cudaMemcpyDeviceToDevice ) );
// Sort pdAgentIndicesSorted on pdCellIndicesSorted using thrust.
thrust::sort_by_key( thrust::device_ptr<uint>( pdCellIndicesSorted ),
thrust::device_ptr<uint>( pdCellIndicesSorted + numAgents ),
thrust::device_ptr<uint>( pdAgentIndicesSorted ) );
// Set all cells to empty.
CUDA_SAFE_CALL( cudaMemset( pdCellStart, 0xffffffff, m_pKNNBinData->getNumCells() * sizeof(uint) ) );
// Bind the textures.
KNNBinningV1ReorderDBBindTextures( pdPosition, numAgents );
// Call KNNBinningReorderDB to re-order the data in the DB.
KNNBinningV1ReorderDB<<< grid, block >>>( pdAgentIndicesSorted, pdCellIndicesSorted, pdPositionSorted, pdCellStart, pdCellEnd, numAgents ); cutilCheckMsg( "KNNBinningReorderDB failed." );
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
// Unbind the textures.
KNNBinningV1ReorderDBUnbindTextures();
#if defined TIMING
//
// TIMING:
//
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
char szString[128] = {0};
sprintf_s( szString, "KNNBinningV1UpdateDBCUDA,%f\n", elapsedTime );
//OutputDebugStringToFile( szString );
OutputDebugString( szString );
// Destroy the events.
cudaEventDestroy( start );
cudaEventDestroy( stop );
#endif
}
void KNNBinningV1UpdateDBCUDA::close( void )
{
// Unbind the texture.
KNNBinningV1UnbindTexture();
// The AgentGroup's database has now changed.
m_pGroup->SetSyncHost();
}
#pragma endregion
#pragma region KNNBinningV1CUDA
KNNBinningV1CUDA::KNNBinningV1CUDA( AgentGroup * pAgentGroup, KNNData * pKNNData, KNNBinData * pKNNBinData, BaseGroup * pOtherGroup, uint const searchRadius )
: AbstractCUDAKernel( pAgentGroup, 1.f, 0 ),
m_pKNNData( pKNNData ),
m_pKNNBinData( pKNNBinData ),
m_pOtherGroup( pOtherGroup ),
m_searchRadius( searchRadius )
{
}
void KNNBinningV1CUDA::init( void )
{
// Bind the cell indices texture.
//KNNBinningV1CUDABindTexture( m_pKNNBinData->pdCellIndexArray() );
}
void KNNBinningV1CUDA::run( void )
{
dim3 grid = gridDim();
dim3 block = blockDim();
// Gather the required data.
float4 const* pdAPositionSorted = m_pAgentGroup->GetKNNDatabase().pdPositionSorted();
uint const* pdAIndices = m_pAgentGroup->GetKNNDatabase().pdAgentIndicesSorted();
uint const& numA = getNumAgents();
float4 const* pdBPositionSorted = m_pOtherGroup->GetKNNDatabase().pdPositionSorted();
uint const* pdBIndices = m_pOtherGroup->GetKNNDatabase().pdAgentIndicesSorted();
uint const& numB = m_pOtherGroup->Size();
uint const* pdBCellStart = m_pOtherGroup->GetKNNDatabase().pdCellStart();
uint const* pdBCellEnd = m_pOtherGroup->GetKNNDatabase().pdCellEnd();
uint const& numCells = m_pKNNBinData->getNumCells();
uint * pdKNNIndices = m_pKNNData->pdKNNIndices();
float * pdKNNDistances = m_pKNNData->pdKNNDistances();
uint const& k = m_pOtherGroup->GetKNNDatabase().k();
bool const groupWithSelf = m_pAgentGroup == m_pOtherGroup;
// Compute the size of shared memory needed for each block.
size_t shMemSize = k * THREADSPERBLOCK * (sizeof(float) + sizeof(uint));
#if defined TIMING
//
// TIMING: hard to get exact times with profiling, too many operations.
//
// Events for timing the complete operation.
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
#endif
// Bind the textures.
KNNBinningV1KernelBindTextures( pdBCellStart, pdBCellEnd, pdBIndices, pdBPositionSorted, numCells, numB );
KNNBinningV1BindTexture( m_pKNNBinData->pdCellIndexArray() );
// Call the KNNBinning kernel.
KNNBinningV1Kernel<<< grid, block, shMemSize >>>( pdAPositionSorted,
pdAIndices,
numA,
m_searchRadius,
pdKNNIndices,
pdKNNDistances,
k,
numB,
groupWithSelf
);
cutilCheckMsg( "KNNBinningKernel failed." );
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
// Unbind the textures.
KNNBinningV1KernelUnbindTextures();
KNNBinningV1UnbindTexture();
#if defined TIMING && defined _DEBUG
//
// TIMING:
//
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
char szString[128] = {0};
sprintf_s( szString, "KNNBinningV1CUDA,%f\n", elapsedTime );
//OutputDebugStringToFile( szString );
OutputDebugString( szString );
// Destroy the events.
cudaEventDestroy( start );
cudaEventDestroy( stop );
#endif
}
void KNNBinningV1CUDA::close( void )
{
// The KNNData has most likely changed.
m_pKNNData->setSyncHost();
}
#pragma endregion
|
b13ad81b41d40817298d372128c235d397d7fb1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <stdlib.h>
#include <algorithm>
#include <limits>
#include <random/rng.cuh>
#include <selection/kselection.cuh>
namespace MLCommon {
namespace Selection {
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
__global__ void sortTestKernel(TypeK *key) {
KVArray<TypeV, TypeK, N, Greater> arr;
#pragma unroll
for (int i = 0; i < N; ++i) {
arr.arr[i].val = (TypeV)laneId();
arr.arr[i].key = (TypeK)laneId();
}
warpFence();
arr.sort();
warpFence();
#pragma unroll
for (int i = 0; i < N; ++i)
arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB);
}
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
void sortTest(TypeK *key) {
TypeK *dkey;
CUDA_CHECK(hipMalloc((void **)&dkey, sizeof(TypeK) * TPB * N));
hipLaunchKernelGGL(( sortTestKernel<TypeV, TypeK, N, TPB, Greater>), dim3(1), dim3(TPB), 0, 0, dkey);
CUDA_CHECK(hipPeekAtLastError());
updateHost<TypeK>(key, dkey, TPB * N, 0);
CUDA_CHECK(hipFree(dkey));
}
/************************************************************************/
/********************** Add the function for CPU test *******************/
/************************************************************************/
template <typename TypeV, typename TypeK, bool Greater>
int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b) {
if (Greater == 0) {
return a.val > b.val;
} else {
return a.val < b.val;
}
}
template <typename TypeV, typename TypeK, bool Greater>
void partSortKVPair(KVPair<TypeV, TypeK> *arr, int N, int k) {
std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, int N, bool Greater>
void sortKVArray(KVArray<TypeV, TypeK, N, Greater> &arr) {
std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, bool Greater>
::testing::AssertionResult checkResult(TypeV *d_arr, TypeV *d_outv,
TypeK *d_outk, int rows, int N, int k,
TypeV tolerance) {
for (int rIndex = 0; rIndex < rows; rIndex++) {
// input data
TypeV *h_arr = new TypeV[N];
updateHost(h_arr, d_arr + rIndex * N, N, 0);
KVPair<TypeV, TypeK> *topk = new KVPair<TypeV, TypeK>[N];
for (int j = 0; j < N; j++) {
topk[j].val = h_arr[j];
topk[j].key = j;
}
// result reference
TypeV *h_outv = new TypeV[k];
updateHost(h_outv, d_outv + rIndex * k, k, 0);
TypeK *h_outk = new TypeK[k];
updateHost(h_outk, d_outk + rIndex * k, k, 0);
// calculate the result
partSortKVPair<TypeV, TypeK, Greater>(topk, N, k);
// check result
for (int j = 0; j < k; j++) {
// std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu "
// <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" "
//<<h_outk[j] <<std::endl<<std::endl;
if (abs(h_outv[j] - topk[j].val) > tolerance) {
return ::testing::AssertionFailure()
<< "actual=" << topk[j].val << " != expected=" << h_outv[j];
}
}
// delete resource
delete[] h_arr;
delete[] h_outv;
delete[] h_outk;
delete[] topk;
}
return ::testing::AssertionSuccess();
}
// Structure WarpTopKInputs
template <typename T>
struct WarpTopKInputs {
T tolerance;
int rows; // batch size
int cols; // N the length of variables
int k; // the top-k value
unsigned long long int seed; // seed to generate data
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const WarpTopKInputs<T> &dims) {
return os;
}
// Define functions WarpTopKTest
template <typename T>
class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam();
Random::Rng r(params.seed);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(arr, params.rows * params.cols);
allocate(outk, params.rows * params.k);
allocate(outv, params.rows * params.k);
r.uniform(arr, params.rows * params.cols, T(-1.0), T(1.0), stream);
static const bool Sort = false;
static const bool Greater = true;
warpTopK<T, int, Greater, Sort>(outv, outk, arr, params.k, params.rows,
params.cols, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(outv));
CUDA_CHECK(hipFree(outk));
CUDA_CHECK(hipFree(arr));
}
protected:
WarpTopKInputs<T> params;
T *arr, *outv;
int *outk;
};
// Parameters
// Milestone 1: Verify the result of current implementation
// Milestone 2: Support all the values of k between 1 and 1024; both inclusive
// Milestone 2.1: Using the POC code to Support all the values
const std::vector<WarpTopKInputs<float>> inputs2_0 = {
{0.00000001, 2, 1024, 256, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_1 = {
{0.00000001, 4, 2048, 1024, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_2 = {
{0.00000001, 4, 2048, 1, 1234ULL}};
// Milestone 2.2: Using the full thread queue and warp queue code to support
// all the values
// @TODO: Milestone 3: Support not sorted
// @TODO: Milestone 4: Support multi-gpu
// Define the function TEST_P
typedef WarpTopKTest<float> TestD2_0;
typedef WarpTopKTest<float> TestD2_1;
typedef WarpTopKTest<float> TestD2_2;
TEST_P(TestD2_0, Result) {
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_1, Result) {
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_2, Result) {
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
// Instantiate
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0,
::testing::ValuesIn(inputs2_0));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1,
::testing::ValuesIn(inputs2_1));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2,
::testing::ValuesIn(inputs2_2));
} // end namespace Selection
} // end namespace MLCommon
| b13ad81b41d40817298d372128c235d397d7fb1d.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <stdlib.h>
#include <algorithm>
#include <limits>
#include <random/rng.cuh>
#include <selection/kselection.cuh>
namespace MLCommon {
namespace Selection {
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
__global__ void sortTestKernel(TypeK *key) {
KVArray<TypeV, TypeK, N, Greater> arr;
#pragma unroll
for (int i = 0; i < N; ++i) {
arr.arr[i].val = (TypeV)laneId();
arr.arr[i].key = (TypeK)laneId();
}
warpFence();
arr.sort();
warpFence();
#pragma unroll
for (int i = 0; i < N; ++i)
arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB);
}
template <typename TypeV, typename TypeK, int N, int TPB, bool Greater>
void sortTest(TypeK *key) {
TypeK *dkey;
CUDA_CHECK(cudaMalloc((void **)&dkey, sizeof(TypeK) * TPB * N));
sortTestKernel<TypeV, TypeK, N, TPB, Greater><<<1, TPB>>>(dkey);
CUDA_CHECK(cudaPeekAtLastError());
updateHost<TypeK>(key, dkey, TPB * N, 0);
CUDA_CHECK(cudaFree(dkey));
}
/************************************************************************/
/********************** Add the function for CPU test *******************/
/************************************************************************/
template <typename TypeV, typename TypeK, bool Greater>
int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b) {
if (Greater == 0) {
return a.val > b.val;
} else {
return a.val < b.val;
}
}
template <typename TypeV, typename TypeK, bool Greater>
void partSortKVPair(KVPair<TypeV, TypeK> *arr, int N, int k) {
std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, int N, bool Greater>
void sortKVArray(KVArray<TypeV, TypeK, N, Greater> &arr) {
std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>);
}
template <typename TypeV, typename TypeK, bool Greater>
::testing::AssertionResult checkResult(TypeV *d_arr, TypeV *d_outv,
TypeK *d_outk, int rows, int N, int k,
TypeV tolerance) {
for (int rIndex = 0; rIndex < rows; rIndex++) {
// input data
TypeV *h_arr = new TypeV[N];
updateHost(h_arr, d_arr + rIndex * N, N, 0);
KVPair<TypeV, TypeK> *topk = new KVPair<TypeV, TypeK>[N];
for (int j = 0; j < N; j++) {
topk[j].val = h_arr[j];
topk[j].key = j;
}
// result reference
TypeV *h_outv = new TypeV[k];
updateHost(h_outv, d_outv + rIndex * k, k, 0);
TypeK *h_outk = new TypeK[k];
updateHost(h_outk, d_outk + rIndex * k, k, 0);
// calculate the result
partSortKVPair<TypeV, TypeK, Greater>(topk, N, k);
// check result
for (int j = 0; j < k; j++) {
// std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu "
// <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" "
//<<h_outk[j] <<std::endl<<std::endl;
if (abs(h_outv[j] - topk[j].val) > tolerance) {
return ::testing::AssertionFailure()
<< "actual=" << topk[j].val << " != expected=" << h_outv[j];
}
}
// delete resource
delete[] h_arr;
delete[] h_outv;
delete[] h_outk;
delete[] topk;
}
return ::testing::AssertionSuccess();
}
// Structure WarpTopKInputs
template <typename T>
struct WarpTopKInputs {
T tolerance;
int rows; // batch size
int cols; // N the length of variables
int k; // the top-k value
unsigned long long int seed; // seed to generate data
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const WarpTopKInputs<T> &dims) {
return os;
}
// Define functions WarpTopKTest
template <typename T>
class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam();
Random::Rng r(params.seed);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(arr, params.rows * params.cols);
allocate(outk, params.rows * params.k);
allocate(outv, params.rows * params.k);
r.uniform(arr, params.rows * params.cols, T(-1.0), T(1.0), stream);
static const bool Sort = false;
static const bool Greater = true;
warpTopK<T, int, Greater, Sort>(outv, outk, arr, params.k, params.rows,
params.cols, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(outv));
CUDA_CHECK(cudaFree(outk));
CUDA_CHECK(cudaFree(arr));
}
protected:
WarpTopKInputs<T> params;
T *arr, *outv;
int *outk;
};
// Parameters
// Milestone 1: Verify the result of current implementation
// Milestone 2: Support all the values of k between 1 and 1024; both inclusive
// Milestone 2.1: Using the POC code to Support all the values
const std::vector<WarpTopKInputs<float>> inputs2_0 = {
{0.00000001, 2, 1024, 256, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_1 = {
{0.00000001, 4, 2048, 1024, 1234ULL}};
const std::vector<WarpTopKInputs<float>> inputs2_2 = {
{0.00000001, 4, 2048, 1, 1234ULL}};
// Milestone 2.2: Using the full thread queue and warp queue code to support
// all the values
// @TODO: Milestone 3: Support not sorted
// @TODO: Milestone 4: Support multi-gpu
// Define the function TEST_P
typedef WarpTopKTest<float> TestD2_0;
typedef WarpTopKTest<float> TestD2_1;
typedef WarpTopKTest<float> TestD2_2;
TEST_P(TestD2_0, Result) {
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_1, Result) {
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
TEST_P(TestD2_2, Result) {
const static bool Greater = true;
ASSERT_TRUE((checkResult<float, int, Greater>(
arr, outv, outk, params.rows, params.cols, params.k, params.tolerance)));
}
// Instantiate
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0,
::testing::ValuesIn(inputs2_0));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1,
::testing::ValuesIn(inputs2_1));
INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2,
::testing::ValuesIn(inputs2_2));
} // end namespace Selection
} // end namespace MLCommon
|
a9e48440bcf17880da67a9bf137c457fbc3a251d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fast_pcl/ndt_gpu/VoxelGrid.h"
#include "fast_pcl/ndt_gpu/debug.h"
#include "fast_pcl/ndt_gpu/common.h"
#include <math.h>
#include <limits>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/fill.h>
#include <inttypes.h>
#include <vector>
#include <cmath>
#include <stdio.h>
#include <sys/time.h>
#include "fast_pcl/ndt_gpu/SymmetricEigenSolver.h"
namespace gpu {
GVoxelGrid::GVoxelGrid():
x_(NULL),
y_(NULL),
z_(NULL),
points_num_(0),
centroid_(NULL),
covariance_(NULL),
inverse_covariance_(NULL),
points_per_voxel_(NULL),
voxel_num_(0),
max_x_(FLT_MAX),
max_y_(FLT_MAX),
max_z_(FLT_MAX),
min_x_(FLT_MIN),
min_y_(FLT_MIN),
min_z_(FLT_MIN),
voxel_x_(0),
voxel_y_(0),
voxel_z_(0),
max_b_x_(0),
max_b_y_(0),
max_b_z_(0),
min_b_x_(0),
min_b_y_(0),
min_b_z_(0),
vgrid_x_(0),
vgrid_y_(0),
vgrid_z_(0),
min_points_per_voxel_(6),
starting_point_ids_(NULL),
point_ids_(NULL),
is_copied_(false)
{
};
GVoxelGrid::GVoxelGrid(const GVoxelGrid &other)
{
x_ = other.x_;
y_ = other.y_;
z_ = other.z_;
points_num_ = other.points_num_;
centroid_ = other.centroid_;
covariance_ = other.covariance_;
inverse_covariance_ = other.inverse_covariance_;
points_per_voxel_ = other.points_per_voxel_;
voxel_num_ = other.voxel_num_;
max_x_ = other.max_x_;
max_y_ = other.max_y_;
max_z_ = other.max_z_;
min_x_ = other.min_x_;
min_y_ = other.min_y_;
min_z_ = other.min_z_;
voxel_x_ = other.voxel_x_;
voxel_y_ = other.voxel_y_;
voxel_z_ = other.voxel_z_;
max_b_x_ = other.max_b_x_;
max_b_y_ = other.max_b_y_;
max_b_z_ = other.max_b_z_;
min_b_x_ = other.min_b_x_;
min_b_y_ = other.min_b_y_;
min_b_z_ = other.min_b_z_;
vgrid_x_ = other.vgrid_x_;
vgrid_y_ = other.vgrid_y_;
vgrid_z_ = other.vgrid_z_;
min_points_per_voxel_ = other.min_points_per_voxel_;
starting_point_ids_ = other.starting_point_ids_;
point_ids_ = other.point_ids_;
is_copied_ = true;
}
GVoxelGrid::~GVoxelGrid() {
if (!is_copied_) {
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(hipFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(hipFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
if (starting_point_ids_ != NULL) {
checkCudaErrors(hipFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(hipFree(point_ids_));
point_ids_ = NULL;
}
if (centroid_ != NULL) {
checkCudaErrors(hipFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(hipFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(hipFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(hipFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
}
}
void GVoxelGrid::initialize()
{
if (centroid_ != NULL) {
checkCudaErrors(hipFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(hipFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(hipFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(hipFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
checkCudaErrors(hipMalloc(¢roid_, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(hipMalloc(&covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(hipMalloc(&inverse_covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(hipMalloc(&points_per_voxel_, sizeof(int) * voxel_num_));
checkCudaErrors(hipMemset(inverse_covariance_, 0, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(hipMemset(points_per_voxel_, 0, sizeof(int) * voxel_num_));
checkCudaErrors(hipDeviceSynchronize());
}
int GVoxelGrid::getVoxelNum() const
{
return voxel_num_;
}
float GVoxelGrid::getMaxX() const
{
return max_x_;
}
float GVoxelGrid::getMaxY() const
{
return max_y_;
}
float GVoxelGrid::getMaxZ() const
{
return max_z_;
}
float GVoxelGrid::getMinX() const
{
return min_x_;
}
float GVoxelGrid::getMinY() const
{
return min_y_;
}
float GVoxelGrid::getMinZ() const
{
return min_z_;
}
float GVoxelGrid::getVoxelX() const
{
return voxel_x_;
}
float GVoxelGrid::getVoxelY() const
{
return voxel_y_;
}
float GVoxelGrid::getVoxelZ() const
{
return voxel_z_;
}
int GVoxelGrid::getMaxBX() const
{
return max_b_x_;
}
int GVoxelGrid::getMaxBY() const
{
return max_b_y_;
}
int GVoxelGrid::getMaxBZ() const
{
return max_b_z_;
}
int GVoxelGrid::getMinBX() const
{
return min_b_x_;
}
int GVoxelGrid::getMinBY() const
{
return min_b_y_;
}
int GVoxelGrid::getMinBZ() const
{
return min_b_z_;
}
int GVoxelGrid::getVgridX() const
{
return vgrid_x_;
}
int GVoxelGrid::getVgridY() const
{
return vgrid_y_;
}
int GVoxelGrid::getVgridZ() const
{
return vgrid_z_;
}
void GVoxelGrid::setLeafSize(float voxel_x, float voxel_y, float voxel_z)
{
voxel_x_ = voxel_x;
voxel_y_ = voxel_y;
voxel_z_ = voxel_z;
}
double* GVoxelGrid::getCentroidList() const
{
return centroid_;
}
double* GVoxelGrid::getCovarianceList() const
{
return covariance_;
}
double* GVoxelGrid::getInverseCovarianceList() const
{
return inverse_covariance_;
}
int* GVoxelGrid::getPointsPerVoxelList() const
{
return points_per_voxel_;
}
extern "C" __device__ int voxelId(float x, float y, float z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z)
{
int id_x = static_cast<int>(floorf(x / voxel_x) - static_cast<float>(min_b_x));
int id_y = static_cast<int>(floorf(y / voxel_y) - static_cast<float>(min_b_y));
int id_z = static_cast<int>(floorf(z / voxel_z) - static_cast<float>(min_b_z));
return (id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y);
}
/* First step to compute centroids and covariances of voxels. */
extern "C" __global__ void initCentroidAndCovariance(float *x, float *y, float *z, int *starting_point_ids, int *point_ids,
double *centroids, double *covariances, int voxel_num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < voxel_num; i += stride) {
MatrixDevice centr(3, 1, voxel_num, centroids + i);
MatrixDevice cov(3, 3, voxel_num, covariances + i);
double centr0, centr1, centr2;
double cov00, cov01, cov02, cov11, cov12, cov22;
centr0 = centr1 = centr2 = 0.0;
cov00 = cov11 = cov22 = 1.0;
cov01 = cov02 = cov12 = 0.0;
for (int j = starting_point_ids[i]; j < starting_point_ids[i + 1]; j++) {
int pid = point_ids[j];
double t_x = static_cast<double>(x[pid]);
double t_y = static_cast<double>(y[pid]);
double t_z = static_cast<double>(z[pid]);
centr0 += t_x;
centr1 += t_y;
centr2 += t_z;
cov00 += t_x * t_x;
cov01 += t_x * t_y;
cov02 += t_x * t_z;
cov11 += t_y * t_y;
cov12 += t_y * t_z;
cov22 += t_z * t_z;
}
centr(0) = centr0;
centr(1) = centr1;
centr(2) = centr2;
cov(0, 0) = cov00;
cov(0, 1) = cov01;
cov(0, 2) = cov02;
cov(1, 1) = cov11;
cov(1, 2) = cov12;
cov(2, 2) = cov22;
}
}
/* Update centroids of voxels. */
extern "C" __global__ void updateVoxelCentroid(double *centroid, int *points_per_voxel, int voxel_num)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
if (points_num > 0) {
centr /= points_num;
}
}
}
/* Update covariance of voxels. */
extern "C" __global__ void updateVoxelCovariance(double *centroid, double *pt_sum, double *covariance, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice pts(3, 1, voxel_num, pt_sum + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
double c0 = centr(0);
double c1 = centr(1);
double c2 = centr(2);
double p0 = pts(0);
double p1 = pts(1);
double p2 = pts(2);
points_per_voxel[vid] = (points_num < min_points_per_voxel) ? 0 : points_num;
if (points_num >= min_points_per_voxel) {
double mult = (points_num - 1.0) / points_num;
cov(0, 0) = ((cov(0, 0) - 2.0 * p0 * c0) / points_num + c0 * c0) * mult;
cov(0, 1) = ((cov(0, 1) - 2.0 * p0 * c1) / points_num + c0 * c1) * mult;
cov(0, 2) = ((cov(0, 2) - 2.0 * p0 * c2) / points_num + c0 * c2) * mult;
cov(1, 0) = cov(0, 1);
cov(1, 1) = ((cov(1, 1) - 2.0 * p1 * c1) / points_num + c1 * c1) * mult;
cov(1, 2) = ((cov(1, 2) - 2.0 * p1 * c2) / points_num + c1 * c2) * mult;
cov(2, 0) = cov(0, 2);
cov(2, 1) = cov(1, 2);
cov(2, 2) = ((cov(2, 2) - 2.0 * p2 * c2) / points_num + c2 * c2) * mult;
}
}
}
extern "C" __global__ void computeInverseEigenvectors(double *inverse_covariance, int *points_per_voxel, int voxel_num, double *eigenvectors, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
eigen_vectors.inverse(icov);
}
__syncthreads();
}
}
//eigen_vecs = eigen_vecs * eigen_val
extern "C" __global__ void updateCovarianceS0(int *points_per_voxel, int voxel_num, double *eigenvalues, double *eigenvectors, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double eig_val0 = eigenvalues[vid];
double eig_val1 = eigenvalues[vid + voxel_num];
double eig_val2 = eigenvalues[vid + 2 * voxel_num];
eigen_vectors(0, 0) *= eig_val0;
eigen_vectors(1, 0) *= eig_val0;
eigen_vectors(2, 0) *= eig_val0;
eigen_vectors(0, 1) *= eig_val1;
eigen_vectors(1, 1) *= eig_val1;
eigen_vectors(2, 1) *= eig_val1;
eigen_vectors(0, 2) *= eig_val2;
eigen_vectors(1, 2) *= eig_val2;
eigen_vectors(2, 2) *= eig_val2;
}
__syncthreads();
}
}
//cov = new eigen_vecs * eigen_vecs transpose
extern "C" __global__ void updateCovarianceS1(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num, double *eigenvectors, int min_points_per_voxel, int col)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double tmp0 = icov(0, col);
double tmp1 = icov(1, col);
double tmp2 = icov(2, col);
cov(0, col) = eigen_vectors(0, 0) * tmp0 + eigen_vectors(0, 1) * tmp1 + eigen_vectors(0, 2) * tmp2;
cov(1, col) = eigen_vectors(1, 0) * tmp0 + eigen_vectors(1, 1) * tmp1 + eigen_vectors(1, 2) * tmp2;
cov(2, col) = eigen_vectors(2, 0) * tmp0 + eigen_vectors(2, 1) * tmp1 + eigen_vectors(2, 2) * tmp2;
}
__syncthreads();
}
}
extern "C" __global__ void computeInverseCovariance(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
cov.inverse(icov);
}
__syncthreads();
}
}
template<typename T>
__global__ void init(T *input, int size, int local_size)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
for (int j = 0; j < local_size; j++)
input[i + j * size] = 1;
}
}
extern "C" __global__ void initBoolean(bool *input, int size)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
input[i] = (i % 2 == 0) ? true : false;
}
}
/* Normalize input matrices to avoid overflow. */
extern "C" __global__ void normalize(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {}
sv.normalizeInput(id);
__syncthreads();
}
}
/* Compute eigenvalues. Eigenvalues are arranged in increasing order.
* (eigen(0) <= eigen(1) <= eigen(2). */
extern "C" __global__ void computeEigenvalues(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvalues(id);
__syncthreads();
}
}
/* First step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void computeEvec00(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector00(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void computeEvec01(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector01(id);
__syncthreads();
}
}
/* First step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void computeEvec10(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector10(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void computeEvec11(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector11(id);
__syncthreads();
}
}
/* Compute eigenvector 2 of covariance matrices. */
extern "C" __global__ void computeEvec2(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector2(id);
__syncthreads();
}
}
/* Final step to compute eigenvalues. */
extern "C" __global__ void updateEval(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.updateEigenvalues(id);
__syncthreads();
}
}
/* Update eigenvalues in the case covariance matrix is nearly singular. */
extern "C" __global__ void updateEval2(double *eigenvalues, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {
MatrixDevice eigen_val(3, 1, voxel_num, eigenvalues + id);
double ev0 = eigen_val(0);
double ev1 = eigen_val(1);
double ev2 = eigen_val(2);
if (ev0 < 0 || ev1 < 0 || ev2 <= 0) {
points_per_voxel[id] = 0;
continue;
}
double min_cov_eigvalue = ev2 * 0.01;
if (ev0 < min_cov_eigvalue) {
ev0 = min_cov_eigvalue;
if (ev1 < min_cov_eigvalue) {
ev1 = min_cov_eigvalue;
}
}
eigen_val(0) = ev0;
eigen_val(1) = ev1;
eigen_val(2) = ev2;
__syncthreads();
}
}
}
void GVoxelGrid::computeCentroidAndCovariance()
{
int block_x = (voxel_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : voxel_num_;
int grid_x = (voxel_num_ - 1) / block_x + 1;
hipLaunchKernelGGL(( initCentroidAndCovariance), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, starting_point_ids_, point_ids_, centroid_, covariance_, voxel_num_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
double *pt_sum;
checkCudaErrors(hipMalloc(&pt_sum, sizeof(double) * voxel_num_ * 3));
checkCudaErrors(hipMemcpy(pt_sum, centroid_, sizeof(double) * voxel_num_ * 3, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( updateVoxelCentroid), dim3(grid_x), dim3(block_x), 0, 0, centroid_, points_per_voxel_, voxel_num_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateVoxelCovariance), dim3(grid_x), dim3(block_x), 0, 0, centroid_, pt_sum, covariance_, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(pt_sum));
double *eigenvalues_dev, *eigenvectors_dev;
checkCudaErrors(hipMalloc(&eigenvalues_dev, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(hipMalloc(&eigenvectors_dev, sizeof(double) * 9 * voxel_num_));
// Solving eigenvalues and eigenvectors problem by the GPU.
SymmetricEigensolver3x3 sv(voxel_num_);
sv.setInputMatrices(covariance_);
sv.setEigenvalues(eigenvalues_dev);
sv.setEigenvectors(eigenvectors_dev);
hipLaunchKernelGGL(( normalize), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEigenvalues), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec00), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec01), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec10), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec11), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec2), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateEval), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateEval2), dim3(grid_x), dim3(block_x), 0, 0, eigenvalues_dev, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeInverseEigenvectors), dim3(grid_x), dim3(block_x), 0, 0, inverse_covariance_, points_per_voxel_, voxel_num_, eigenvectors_dev, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateCovarianceS0), dim3(grid_x), dim3(block_x), 0, 0, points_per_voxel_, voxel_num_, eigenvalues_dev, eigenvectors_dev, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
for (int i = 0; i < 3; i++) {
hipLaunchKernelGGL(( updateCovarianceS1), dim3(grid_x), dim3(block_x), 0, 0, covariance_, inverse_covariance_, points_per_voxel_, voxel_num_, eigenvectors_dev, min_points_per_voxel_, i);
checkCudaErrors(hipGetLastError());
}
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( computeInverseCovariance), dim3(grid_x), dim3(block_x), 0, 0, covariance_, inverse_covariance_, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
sv.memFree();
checkCudaErrors(hipFree(eigenvalues_dev));
checkCudaErrors(hipFree(eigenvectors_dev));
}
//Input are supposed to be in device memory
void GVoxelGrid::setInput(float *x, float *y, float *z, int points_num)
{
if (points_num <= 0)
return;
x_ = x;
y_ = y;
z_ = z;
points_num_ = points_num;
findBoundaries();
voxel_num_ = vgrid_x_ * vgrid_y_ * vgrid_z_;
initialize();
scatterPointsToVoxelGrid();
computeCentroidAndCovariance();
buildOctree();
}
/* Find the largest coordinate values */
extern "C" __global__ void findMax(float *x, float *y, float *z, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] >= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] >= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
/* Find the smallest coordinate values */
extern "C" __global__ void findMin(float *x, float *y, float *z, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] <= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] <= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
void GVoxelGrid::findBoundaries()
{
float *max_x, *max_y, *max_z, *min_x, *min_y, *min_z;
checkCudaErrors(hipMalloc(&max_x, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&max_y, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&max_z, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&min_x, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&min_y, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&min_z, sizeof(float) * points_num_));
checkCudaErrors(hipMemcpy(max_x, x_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(max_y, y_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(max_z, z_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(min_x, x_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(min_y, y_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(min_z, z_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
int points_num = points_num_;
while (points_num > 1) {
int half_points_num = (points_num - 1) / 2 + 1;
int block_x = (half_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_points_num;
int grid_x = (half_points_num - 1) / block_x + 1;
hipLaunchKernelGGL(( findMax), dim3(grid_x), dim3(block_x), 0, 0, max_x, max_y, max_z, points_num, half_points_num);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( findMin), dim3(grid_x), dim3(block_x), 0, 0, min_x, min_y, min_z, points_num, half_points_num);
checkCudaErrors(hipGetLastError());
points_num = half_points_num;
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&max_x_, max_x, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&max_y_, max_y, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&max_z_, max_z, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_x_, min_x, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_y_, min_y, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_z_, min_z, sizeof(float), hipMemcpyDeviceToHost));
max_b_x_ = static_cast<int> (floor(max_x_ / voxel_x_));
max_b_y_ = static_cast<int> (floor(max_y_ / voxel_y_));
max_b_z_ = static_cast<int> (floor(max_z_ / voxel_z_));
min_b_x_ = static_cast<int> (floor(min_x_ / voxel_x_));
min_b_y_ = static_cast<int> (floor(min_y_ / voxel_y_));
min_b_z_ = static_cast<int> (floor(min_z_ / voxel_z_));
vgrid_x_ = max_b_x_ - min_b_x_ + 1;
vgrid_y_ = max_b_y_ - min_b_y_ + 1;
vgrid_z_ = max_b_z_ - min_b_z_ + 1;
checkCudaErrors(hipFree(max_x));
checkCudaErrors(hipFree(max_y));
checkCudaErrors(hipFree(max_z));
checkCudaErrors(hipFree(min_x));
checkCudaErrors(hipFree(min_y));
checkCudaErrors(hipFree(min_z));
}
/* Find indexes idx, idy and idz of candidate voxels */
extern "C" __global__ void findBoundariesOfCandidateVoxels(float *x, float *y, float *z,
float radius, int points_num,
float voxel_x, float voxel_y, float voxel_z,
int max_b_x, int max_b_y, int max_b_z,
int min_b_x, int min_b_y, int min_b_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *candidate_voxel_per_point)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int max_id_x = static_cast<int>(floorf((t_x + radius) / voxel_x));
int max_id_y = static_cast<int>(floorf((t_y + radius) / voxel_y));
int max_id_z = static_cast<int>(floorf((t_z + radius) / voxel_z));
int min_id_x = static_cast<int>(floorf((t_x - radius) / voxel_x));
int min_id_y = static_cast<int>(floorf((t_y - radius) / voxel_y));
int min_id_z = static_cast<int>(floorf((t_z - radius) / voxel_z));
/* Find intersection of the cube containing
* the NN sphere of the point and the voxel grid
*/
max_id_x = (max_id_x > max_b_x) ? max_b_x - min_b_x : max_id_x - min_b_x;
max_id_y = (max_id_y > max_b_y) ? max_b_y - min_b_y : max_id_y - min_b_y;
max_id_z = (max_id_z > max_b_z) ? max_b_z - min_b_z : max_id_z - min_b_z;
min_id_x = (min_id_x < min_b_x) ? 0 : min_id_x - min_b_x;
min_id_y = (min_id_y < min_b_y) ? 0 : min_id_y - min_b_y;
min_id_z = (min_id_z < min_b_z) ? 0 : min_id_z - min_b_z;
int vx = max_id_x - min_id_x + 1;
int vy = max_id_y - min_id_y + 1;
int vz = max_id_z - min_id_z + 1;
candidate_voxel_per_point[i] = (vx > 0 && vy > 0 && vz > 0) ? vx * vy * vz : 0;
max_vid_x[i] = max_id_x;
max_vid_y[i] = max_id_y;
max_vid_z[i] = max_id_z;
min_vid_x[i] = min_id_x;
min_vid_y[i] = min_id_y;
min_vid_z[i] = min_id_z;
}
}
/* Write id of valid points to the output buffer */
extern "C" __global__ void collectValidPoints(int *valid_points_mark, int *valid_points_id, int *valid_points_location, int points_num)
{
for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < points_num; index += blockDim.x * gridDim.x) {
if (valid_points_mark[index] != 0) {
valid_points_id[valid_points_location[index]] = index;
}
}
}
/* Compute the global index of candidate voxels.
* global index = idx + idy * grid size x + idz * grid_size x * grid size y */
extern "C" __global__ void updateCandidateVoxelIds(int points_num,
int vgrid_x, int vgrid_y, int vgrid_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *starting_voxel_id,
int *candidate_voxel_id)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
int max_id_x = max_vid_x[i];
int max_id_y = max_vid_y[i];
int max_id_z = max_vid_z[i];
int min_id_x = min_vid_x[i];
int min_id_y = min_vid_y[i];
int min_id_z = min_vid_z[i];
int write_location = starting_voxel_id[i];
for (int j = min_id_x; j <= max_id_x; j++) {
for (int k = min_id_y; k <= max_id_y; k++) {
for (int l = min_id_z; l <= max_id_z; l++) {
candidate_voxel_id[write_location] = j + k * vgrid_x + l * vgrid_x * vgrid_y;
write_location++;
}
}
}
}
}
/* Find out which voxels are really inside the radius.
* This is done by comparing the distance between the centroid
* of the voxel and the query point with the radius.
*
* The valid_voxel_mark store the result of the inspection, which is 0
* if the centroid is outside the radius and 1 otherwise.
*
* The valid_points_mark store the status of the inspection per point.
* It is 0 if there is no voxels in the candidate list is truly a neighbor
* of the point, and 1 otherwise.
*
* The valid_voxel_count store the number of true neighbor voxels.
*/
extern "C" __global__ void inspectCandidateVoxels(float *x, float *y, float *z,
float radius, int max_nn, int points_num,
double *centroid, int *points_per_voxel, int offset,
int *starting_voxel_id, int *candidate_voxel_id,
int *valid_voxel_mark, int *valid_voxel_count, int *valid_points_mark)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
int nn = 0;
for (int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1] && nn <= max_nn; j++) {
int point_num = points_per_voxel[candidate_voxel_id[j]];
MatrixDevice centr(3, 1, offset, centroid + candidate_voxel_id[j]);
double centroid_x = (point_num > 0) ? (t_x - centr(0)) : radius + 1;
double centroid_y = (point_num > 0) ? (t_y - centr(1)) : 0;
double centroid_z = (point_num > 0) ? (t_z - centr(2)) : 0;
bool res = (norm3d(centroid_x, centroid_y, centroid_z) <= radius);
valid_voxel_mark[j] = (res) ? 1 : 0;
nn += (res) ? 1 : 0;
}
valid_voxel_count[i] = nn;
valid_points_mark[i] = (nn > 0) ? 1 : 0;
__syncthreads();
}
}
/* Write the id of valid voxels to the output buffer */
extern "C" __global__ void collectValidVoxels(int *valid_voxels_mark, int *candidate_voxel_id, int *output, int *writing_location, int candidate_voxel_num)
{
for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < candidate_voxel_num; index += blockDim.x * gridDim.x) {
if (valid_voxels_mark[index] == 1) {
output[writing_location[index]] = candidate_voxel_id[index];
}
}
}
/* Write the number of valid voxel per point to the output buffer */
extern "C" __global__ void collectValidVoxelCount(int *input_valid_voxel_count, int *output_valid_voxel_count, int *writing_location, int points_num)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < points_num; id += blockDim.x * gridDim.x) {
if (input_valid_voxel_count[id] != 0)
output_valid_voxel_count[writing_location[id]] = input_valid_voxel_count[id];
}
}
template <typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num, T *sum)
{
thrust::device_ptr<T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(hipDeviceSynchronize());
*sum = *(dev_ptr + ele_num - 1);
}
template <typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num)
{
thrust::device_ptr<T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(hipDeviceSynchronize());
}
void GVoxelGrid::radiusSearch(float *qx, float *qy, float *qz, int points_num, float radius, int max_nn,
int **valid_points, int **starting_voxel_id, int **valid_voxel_id,
int *valid_voxel_num, int *valid_points_num)
{
//Testing input query points
int block_x = (points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num;
int grid_x = (points_num - 1) / block_x + 1;
//Boundaries of candidate voxels per points
int *max_vid_x, *max_vid_y, *max_vid_z;
int *min_vid_x, *min_vid_y, *min_vid_z;
checkCudaErrors(hipMalloc(&max_vid_x, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&max_vid_y, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&max_vid_z, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&min_vid_x, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&min_vid_y, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&min_vid_z, sizeof(int) * points_num));
//Determine the number of candidate voxel per points
int *candidate_voxel_num_per_point;
int total_candidate_voxel_num;
checkCudaErrors(hipMalloc(&candidate_voxel_num_per_point, sizeof(int) * (points_num + 1)));
hipLaunchKernelGGL(( findBoundariesOfCandidateVoxels), dim3(grid_x), dim3(block_x), 0, 0, qx, qy, qz, radius, points_num,
voxel_x_, voxel_y_, voxel_z_,
max_b_x_, max_b_y_, max_b_z_,
min_b_x_, min_b_y_, min_b_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Total candidate voxel num is determined by an exclusive scan on candidate_voxel_num_per_point
ExclusiveScan(candidate_voxel_num_per_point, points_num + 1, &total_candidate_voxel_num);
if (total_candidate_voxel_num <= 0) {
std::cout << "No candidate voxel was found. Exiting..." << std::endl;
checkCudaErrors(hipFree(max_vid_x));
checkCudaErrors(hipFree(max_vid_y));
checkCudaErrors(hipFree(max_vid_z));
checkCudaErrors(hipFree(min_vid_x));
checkCudaErrors(hipFree(min_vid_y));
checkCudaErrors(hipFree(min_vid_z));
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
//Determine the voxel id of candidate voxels
int *candidate_voxel_id;
checkCudaErrors(hipMalloc(&candidate_voxel_id, sizeof(int) * total_candidate_voxel_num));
hipLaunchKernelGGL(( updateCandidateVoxelIds), dim3(grid_x), dim3(block_x), 0, 0, points_num, vgrid_x_, vgrid_y_, vgrid_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point, candidate_voxel_id);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Go through the candidate voxel id list and find out which voxels are really inside the radius
int *valid_voxel_mark;
checkCudaErrors(hipMalloc(&valid_voxel_mark, sizeof(int) * total_candidate_voxel_num));
int *valid_voxel_count;
checkCudaErrors(hipMalloc(&valid_voxel_count, sizeof(int) * (points_num + 1)));
int *valid_points_mark;
checkCudaErrors(hipMalloc(&valid_points_mark, sizeof(int) * points_num));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
///CHECK VALID VOXEL COUNT AGAIN
hipLaunchKernelGGL(( inspectCandidateVoxels), dim3(grid_x), dim3(block_x), 0, 0, qx, qy, qz, radius, max_nn, points_num,
centroid_, points_per_voxel_, voxel_num_,
candidate_voxel_num_per_point, candidate_voxel_id,
valid_voxel_mark, valid_voxel_count, valid_points_mark);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Collect valid points
int *valid_points_location;
checkCudaErrors(hipMalloc(&valid_points_location, sizeof(int) * (points_num + 1)));
checkCudaErrors(hipMemset(valid_points_location, 0, sizeof(int) * (points_num + 1)));
checkCudaErrors(hipMemcpy(valid_points_location, valid_points_mark, sizeof(int) * points_num, hipMemcpyDeviceToDevice));
//Writing location to the output buffer is determined by an exclusive scan
ExclusiveScan(valid_points_location, points_num + 1, valid_points_num);
if (*valid_points_num <= 0) {
//std::cout << "No valid point was found. Exiting..." << std::endl;
std::cout << "No valid point was found. Exiting...: " << *valid_points_num << std::endl;
checkCudaErrors(hipFree(max_vid_x));
checkCudaErrors(hipFree(max_vid_y));
checkCudaErrors(hipFree(max_vid_z));
checkCudaErrors(hipFree(min_vid_x));
checkCudaErrors(hipFree(min_vid_y));
checkCudaErrors(hipFree(min_vid_z));
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
checkCudaErrors(hipFree(candidate_voxel_id));
checkCudaErrors(hipFree(valid_voxel_mark));
checkCudaErrors(hipFree(valid_voxel_count));
checkCudaErrors(hipFree(valid_points_mark));
checkCudaErrors(hipFree(valid_points_location));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
checkCudaErrors(hipMalloc(valid_points, sizeof(int) * (*valid_points_num)));
hipLaunchKernelGGL(( collectValidPoints), dim3(grid_x), dim3(block_x), 0, 0, valid_points_mark, *valid_points, valid_points_location, points_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMalloc(starting_voxel_id, sizeof(int) * (*valid_points_num + 1)));
hipLaunchKernelGGL(( collectValidVoxelCount), dim3(grid_x), dim3(block_x), 0, 0, valid_voxel_count, *starting_voxel_id, valid_points_location, points_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Determine the starting location of voxels per points in the valid points list
ExclusiveScan(*starting_voxel_id, *valid_points_num + 1, valid_voxel_num);
//Collect valid voxels
int *valid_voxel_location;
checkCudaErrors(hipMalloc(&valid_voxel_location, sizeof(int) * (total_candidate_voxel_num + 1)));
checkCudaErrors(hipMemcpy(valid_voxel_location, valid_voxel_mark, sizeof(int) * total_candidate_voxel_num, hipMemcpyDeviceToDevice));
ExclusiveScan(valid_voxel_location, total_candidate_voxel_num + 1, valid_voxel_num);
if (*valid_voxel_num <= 0) {
checkCudaErrors(hipFree(max_vid_x));
max_vid_x = NULL;
checkCudaErrors(hipFree(max_vid_y));
max_vid_y = NULL;
checkCudaErrors(hipFree(max_vid_z));
max_vid_z = NULL;
checkCudaErrors(hipFree(min_vid_x));
min_vid_x = NULL;
checkCudaErrors(hipFree(min_vid_y));
min_vid_y = NULL;
checkCudaErrors(hipFree(min_vid_z));
min_vid_z = NULL;
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
candidate_voxel_num_per_point = NULL;
checkCudaErrors(hipFree(candidate_voxel_id));
candidate_voxel_id = NULL;
checkCudaErrors(hipFree(valid_voxel_mark));
valid_voxel_mark = NULL;
checkCudaErrors(hipFree(valid_voxel_count));
valid_voxel_count = NULL;
checkCudaErrors(hipFree(valid_points_mark));
valid_points_mark = NULL;
checkCudaErrors(hipFree(valid_points_location));
valid_points_location = NULL;
checkCudaErrors(hipFree(valid_voxel_location));
valid_voxel_location = NULL;
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
}
checkCudaErrors(hipMalloc(valid_voxel_id, sizeof(int) * (*valid_voxel_num)));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
hipLaunchKernelGGL(( collectValidVoxels), dim3(grid_x), dim3(block_x), 0, 0, valid_voxel_mark, candidate_voxel_id, *valid_voxel_id, valid_voxel_location, total_candidate_voxel_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(max_vid_x));
checkCudaErrors(hipFree(max_vid_y));
checkCudaErrors(hipFree(max_vid_z));
checkCudaErrors(hipFree(min_vid_x));
checkCudaErrors(hipFree(min_vid_y));
checkCudaErrors(hipFree(min_vid_z));
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
checkCudaErrors(hipFree(candidate_voxel_id));
checkCudaErrors(hipFree(valid_voxel_mark));
checkCudaErrors(hipFree(valid_points_mark));
checkCudaErrors(hipFree(valid_voxel_count));
checkCudaErrors(hipFree(valid_points_location));
checkCudaErrors(hipFree(valid_voxel_location));
}
/* Build parent nodes from child nodes of the octree */
extern "C" __global__ void buildParent(double *child_centroids, int *points_per_child,
int child_grid_x, int child_grid_y, int child_grid_z, int child_num,
double *parent_centroids, int *points_per_parent,
int parent_grid_x, int parent_grid_y, int parent_grid_z)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int idz = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < parent_grid_x && idy < parent_grid_y && idz < parent_grid_z) {
int parent_idx = idx + idy * parent_grid_x + idz * parent_grid_x * parent_grid_y;
MatrixDevice parent_centr(3, 1, parent_grid_x * parent_grid_y * parent_grid_z, parent_centroids + parent_idx);
double pc0, pc1, pc2;
int points_num = 0;
double dpoints_num;
pc0 = 0.0;
pc1 = 0.0;
pc2 = 0.0;
for (int i = idx * 2; i < idx * 2 + 2 && i < child_grid_x; i++) {
for (int j = idy * 2; j < idy * 2 + 2 && j < child_grid_y; j++) {
for (int k = idz * 2; k < idz * 2 + 2 && k < child_grid_z; k++) {
int child_idx = i + j * child_grid_x + k * child_grid_x * child_grid_y;
MatrixDevice child_centr(3, 1, child_num, child_centroids + child_idx);
int child_points = points_per_child[child_idx];
double dchild_points = static_cast<double>(child_points);
pc0 += (child_points > 0) ? dchild_points * child_centr(0) : 0.0;
pc1 += (child_points > 0) ? dchild_points * child_centr(1) : 0.0;
pc2 += (child_points > 0) ? dchild_points * child_centr(2) : 0.0;
points_num += (child_points > 0) ? child_points : 0;
__syncthreads();
}
}
}
dpoints_num = static_cast<double>(points_num);
parent_centr(0) = (points_num <= 0) ? DBL_MAX : pc0 / dpoints_num;
parent_centr(1) = (points_num <= 0) ? DBL_MAX : pc1 / dpoints_num;
parent_centr(2) = (points_num <= 0) ? DBL_MAX : pc2 / dpoints_num;
points_per_parent[parent_idx] = points_num;
}
}
/* Compute the number of points per voxel using atomicAdd */
extern "C" __global__ void insertPointsToGrid(float *x, float *y, float *z, int points_num,
int *points_per_voxel, int voxel_num,
int vgrid_x, int vgrid_y, int vgrid_z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int voxel_id = voxelId(t_x, t_y, t_z, voxel_x, voxel_y, voxel_z, min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z);
// Update number of points in the voxel
int ptr_increment = (voxel_id < voxel_num) * voxel_id; // if (voxel_id < voxel_num), then use voxel_id
int incremental_value = (voxel_id < voxel_num);
//atomicAdd(points_per_voxel + voxel_id, 1);
atomicAdd(points_per_voxel + ptr_increment, incremental_value);
}
}
/* Rearrange points to locations corresponding to voxels */
extern "C" __global__ void scatterPointsToVoxels(float *x, float *y, float *z, int points_num, int voxel_num,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z,
int *writing_locations, int *point_ids)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int voxel_id = voxelId(x[i], y[i], z[i], voxel_x, voxel_y, voxel_z,
min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z);
int ptr_increment = (voxel_id < voxel_num) * voxel_id;
int incremental_value = (voxel_id < voxel_num);
//int loc = atomicAdd(writing_locations + voxel_id, 1);
int loc = atomicAdd(writing_locations + ptr_increment, incremental_value);
point_ids[loc] = i;
}
}
void GVoxelGrid::scatterPointsToVoxelGrid()
{
if (starting_point_ids_ != NULL) {
checkCudaErrors(hipFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(hipFree(point_ids_));
point_ids_ = NULL;
}
int block_x = (points_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num_;
int grid_x = (points_num_ - 1) / block_x + 1;
hipLaunchKernelGGL(( insertPointsToGrid), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_num_, points_per_voxel_, voxel_num_,
vgrid_x_, vgrid_y_, vgrid_z_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMalloc(&starting_point_ids_, sizeof(int) * (voxel_num_ + 1)));
int *writing_location;
checkCudaErrors(hipMalloc(&writing_location, sizeof(int) * voxel_num_));
checkCudaErrors(hipMemcpy(starting_point_ids_, points_per_voxel_, sizeof(int) * voxel_num_, hipMemcpyDeviceToDevice));
ExclusiveScan(starting_point_ids_, voxel_num_ + 1);
checkCudaErrors(hipMemcpy(writing_location, starting_point_ids_, sizeof(int) * voxel_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMalloc(&point_ids_, sizeof(int) * points_num_));
hipLaunchKernelGGL(( scatterPointsToVoxels), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_num_, voxel_num_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_,
vgrid_x_, vgrid_y_, vgrid_z_,
writing_location, point_ids_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(writing_location));
}
void GVoxelGrid::buildOctree()
{
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(hipFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(hipFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
//Push leafs to the octree list
octree_centroids_.push_back(centroid_);
octree_points_per_node_.push_back(points_per_voxel_);
OctreeGridSize grid_size;
grid_size.size_x = vgrid_x_;
grid_size.size_y = vgrid_y_;
grid_size.size_z = vgrid_z_;
octree_grid_size_.push_back(grid_size);
int node_number = voxel_num_;
int child_grid_x, child_grid_y, child_grid_z;
int parent_grid_x, parent_grid_y, parent_grid_z;
int i = 0;
while (node_number > 100000000) {
child_grid_x = octree_grid_size_[i].size_x;
child_grid_y = octree_grid_size_[i].size_y;
child_grid_z = octree_grid_size_[i].size_z;
parent_grid_x = (child_grid_x - 1) / 2 + 1;
parent_grid_y = (child_grid_y - 1) / 2 + 1;
parent_grid_z = (child_grid_z - 1) / 2 + 1;
node_number = parent_grid_x * parent_grid_y * parent_grid_z;
double *parent_centroids;
int *points_per_parent;
checkCudaErrors(hipMalloc(&parent_centroids, sizeof(double) * 3 * node_number));
checkCudaErrors(hipMalloc(&points_per_parent, sizeof(int) * node_number));
double *child_centroids = octree_centroids_[i];
int *points_per_child = octree_points_per_node_[i];
int block_x = (parent_grid_x > BLOCK_X) ? BLOCK_X : parent_grid_x;
int block_y = (parent_grid_y > BLOCK_Y) ? BLOCK_Y : parent_grid_y;
int block_z = (parent_grid_z > BLOCK_Z) ? BLOCK_Z : parent_grid_z;
int grid_x = (parent_grid_x - 1) / block_x + 1;
int grid_y = (parent_grid_y - 1) / block_y + 1;
int grid_z = (parent_grid_z - 1) / block_z + 1;
dim3 block(block_x, block_y, block_z);
dim3 grid(grid_x, grid_y, grid_z);
hipLaunchKernelGGL(( buildParent), dim3(grid), dim3(block), 0, 0, child_centroids, points_per_child,
child_grid_x, child_grid_y, child_grid_z, child_grid_x * child_grid_y * child_grid_z,
parent_centroids, points_per_parent,
parent_grid_x, parent_grid_y, parent_grid_z);
checkCudaErrors(hipGetLastError());
octree_centroids_.push_back(parent_centroids);
octree_points_per_node_.push_back(points_per_parent);
grid_size.size_x = parent_grid_x;
grid_size.size_y = parent_grid_y;
grid_size.size_z = parent_grid_z;
octree_grid_size_.push_back(grid_size);
i++;
}
checkCudaErrors(hipDeviceSynchronize());
}
/* Search for the nearest octree node */
extern "C" __global__ void nearestOctreeNodeSearch(float *x, float *y, float *z,
int *vid_x, int *vid_y, int *vid_z,
int points_num,
double *centroids, int *points_per_node,
int vgrid_x, int vgrid_y, int vgrid_z, int node_num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int vx = vid_x[i];
int vy = vid_y[i];
int vz = vid_z[i];
double min_dist = DBL_MAX;
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
double cur_dist;
int out_x, out_y, out_z;
out_x = vx;
out_y = vy;
out_z = vz;
double tmp_x, tmp_y, tmp_z;
for (int j = vx * 2; j < vx * 2 + 2 && j < vgrid_x; j++) {
for (int k = vy * 2; k < vy * 2 + 2 && k < vgrid_y; k++) {
for (int l = vz * 2; l < vz * 2 + 2 && l < vgrid_z; l++) {
int node_id = j + k * vgrid_x + l * vgrid_x * vgrid_y;
MatrixDevice node_centr(3, 1, node_num, centroids + node_id);
int points = points_per_node[node_id];
tmp_x = (points > 0) ? node_centr(0) - t_x : DBL_MAX;
tmp_y = (points > 0) ? node_centr(1) - t_y : 0.0;
tmp_z = (points > 0) ? node_centr(2) - t_z : 0.0;
cur_dist = norm3d(tmp_x, tmp_y, tmp_z);
bool res = (cur_dist < min_dist);
out_x = (res) ? j : out_x;
out_y = (res) ? k : out_y;
out_z = (res) ? l : out_z;
min_dist = (res) ? cur_dist : min_dist;
}
}
}
vid_x[i] = out_x;
vid_y[i] = out_y;
vid_z[i] = out_z;
}
}
/* Search for the nearest point from nearest voxel */
extern "C" __global__ void nearestPointSearch(float *qx, float *qy, float *qz, int qpoints_num,
float *rx, float *ry, float *rz, int rpoints_num,
int *vid_x, int *vid_y, int *vid_z,
int vgrid_x, int vgrid_y, int vgrid_z, int voxel_num,
int *starting_point_id, int *point_id, double *min_distance)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < qpoints_num; i += stride) {
int voxel_id = vid_x[i] + vid_y[i] * vgrid_x + vid_z[i] * vgrid_x * vgrid_y;
float cor_qx = qx[i];
float cor_qy = qy[i];
float cor_qz = qz[i];
float min_dist = FLT_MAX;
for (int j = starting_point_id[voxel_id]; j < starting_point_id[voxel_id + 1]; j++) {
int pid = point_id[j];
float cor_rx = rx[pid];
float cor_ry = ry[pid];
float cor_rz = rz[pid];
cor_rx -= cor_qx;
cor_ry -= cor_qy;
cor_rz -= cor_qz;
min_dist = fminf(norm3df(cor_rx, cor_ry, cor_rz), min_dist);
}
min_distance[i] = static_cast<double>(min_dist);
}
}
/* Verify if min distances are really smaller than or equal to max_range */
extern "C" __global__ void verifyDistances(int *valid_distance, double *min_distance, double max_range, int points_num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
bool check = (min_distance[i] <= max_range);
valid_distance[i] = (check) ? 1 : 0;
if (!check) {
min_distance[i] = 0;
}
}
}
void GVoxelGrid::nearestNeighborSearch(float *trans_x, float *trans_y, float *trans_z, int point_num, int *valid_distance, double *min_distance, float max_range)
{
int *vid_x, *vid_y, *vid_z;
checkCudaErrors(hipMalloc(&vid_x, sizeof(int) * point_num));
checkCudaErrors(hipMalloc(&vid_y, sizeof(int) * point_num));
checkCudaErrors(hipMalloc(&vid_z, sizeof(int) * point_num));
checkCudaErrors(hipMemset(vid_x, 0, sizeof(int) * point_num));
checkCudaErrors(hipMemset(vid_y, 0, sizeof(int) * point_num));
checkCudaErrors(hipMemset(vid_z, 0, sizeof(int) * point_num));
checkCudaErrors(hipDeviceSynchronize());
int block_x = (point_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : point_num;
int grid_x = (point_num - 1) / block_x + 1;
// Go through top of the octree to the bottom
for (int i = octree_centroids_.size() - 1; i >= 0; i--) {
double *centroids = octree_centroids_[i];
int *points_per_node = octree_points_per_node_[i];
int vgrid_x = octree_grid_size_[i].size_x;
int vgrid_y = octree_grid_size_[i].size_y;
int vgrid_z = octree_grid_size_[i].size_z;
int node_num = vgrid_x * vgrid_y * vgrid_z;
hipLaunchKernelGGL(( nearestOctreeNodeSearch), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z,
vid_x, vid_y, vid_z,
point_num,
centroids, points_per_node,
vgrid_x, vgrid_y, vgrid_z, node_num);
checkCudaErrors(hipGetLastError());
}
hipLaunchKernelGGL(( nearestPointSearch), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, point_num,
x_, y_, z_, points_num_,
vid_x, vid_y, vid_z,
vgrid_x_, vgrid_y_, vgrid_z_, voxel_num_,
starting_point_ids_, point_ids_,
min_distance);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( verifyDistances), dim3(grid_x), dim3(block_x), 0, 0, valid_distance, min_distance, max_range, point_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(vid_x));
checkCudaErrors(hipFree(vid_y));
checkCudaErrors(hipFree(vid_z));
}
}
| a9e48440bcf17880da67a9bf137c457fbc3a251d.cu | #include "fast_pcl/ndt_gpu/VoxelGrid.h"
#include "fast_pcl/ndt_gpu/debug.h"
#include "fast_pcl/ndt_gpu/common.h"
#include <math.h>
#include <limits>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/fill.h>
#include <inttypes.h>
#include <vector>
#include <cmath>
#include <stdio.h>
#include <sys/time.h>
#include "fast_pcl/ndt_gpu/SymmetricEigenSolver.h"
namespace gpu {
GVoxelGrid::GVoxelGrid():
x_(NULL),
y_(NULL),
z_(NULL),
points_num_(0),
centroid_(NULL),
covariance_(NULL),
inverse_covariance_(NULL),
points_per_voxel_(NULL),
voxel_num_(0),
max_x_(FLT_MAX),
max_y_(FLT_MAX),
max_z_(FLT_MAX),
min_x_(FLT_MIN),
min_y_(FLT_MIN),
min_z_(FLT_MIN),
voxel_x_(0),
voxel_y_(0),
voxel_z_(0),
max_b_x_(0),
max_b_y_(0),
max_b_z_(0),
min_b_x_(0),
min_b_y_(0),
min_b_z_(0),
vgrid_x_(0),
vgrid_y_(0),
vgrid_z_(0),
min_points_per_voxel_(6),
starting_point_ids_(NULL),
point_ids_(NULL),
is_copied_(false)
{
};
GVoxelGrid::GVoxelGrid(const GVoxelGrid &other)
{
x_ = other.x_;
y_ = other.y_;
z_ = other.z_;
points_num_ = other.points_num_;
centroid_ = other.centroid_;
covariance_ = other.covariance_;
inverse_covariance_ = other.inverse_covariance_;
points_per_voxel_ = other.points_per_voxel_;
voxel_num_ = other.voxel_num_;
max_x_ = other.max_x_;
max_y_ = other.max_y_;
max_z_ = other.max_z_;
min_x_ = other.min_x_;
min_y_ = other.min_y_;
min_z_ = other.min_z_;
voxel_x_ = other.voxel_x_;
voxel_y_ = other.voxel_y_;
voxel_z_ = other.voxel_z_;
max_b_x_ = other.max_b_x_;
max_b_y_ = other.max_b_y_;
max_b_z_ = other.max_b_z_;
min_b_x_ = other.min_b_x_;
min_b_y_ = other.min_b_y_;
min_b_z_ = other.min_b_z_;
vgrid_x_ = other.vgrid_x_;
vgrid_y_ = other.vgrid_y_;
vgrid_z_ = other.vgrid_z_;
min_points_per_voxel_ = other.min_points_per_voxel_;
starting_point_ids_ = other.starting_point_ids_;
point_ids_ = other.point_ids_;
is_copied_ = true;
}
GVoxelGrid::~GVoxelGrid() {
if (!is_copied_) {
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(cudaFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(cudaFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
if (starting_point_ids_ != NULL) {
checkCudaErrors(cudaFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(cudaFree(point_ids_));
point_ids_ = NULL;
}
if (centroid_ != NULL) {
checkCudaErrors(cudaFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(cudaFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(cudaFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(cudaFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
}
}
void GVoxelGrid::initialize()
{
if (centroid_ != NULL) {
checkCudaErrors(cudaFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(cudaFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(cudaFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(cudaFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
checkCudaErrors(cudaMalloc(¢roid_, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(cudaMalloc(&covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(cudaMalloc(&inverse_covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(cudaMalloc(&points_per_voxel_, sizeof(int) * voxel_num_));
checkCudaErrors(cudaMemset(inverse_covariance_, 0, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(cudaMemset(points_per_voxel_, 0, sizeof(int) * voxel_num_));
checkCudaErrors(cudaDeviceSynchronize());
}
int GVoxelGrid::getVoxelNum() const
{
return voxel_num_;
}
float GVoxelGrid::getMaxX() const
{
return max_x_;
}
float GVoxelGrid::getMaxY() const
{
return max_y_;
}
float GVoxelGrid::getMaxZ() const
{
return max_z_;
}
float GVoxelGrid::getMinX() const
{
return min_x_;
}
float GVoxelGrid::getMinY() const
{
return min_y_;
}
float GVoxelGrid::getMinZ() const
{
return min_z_;
}
float GVoxelGrid::getVoxelX() const
{
return voxel_x_;
}
float GVoxelGrid::getVoxelY() const
{
return voxel_y_;
}
float GVoxelGrid::getVoxelZ() const
{
return voxel_z_;
}
int GVoxelGrid::getMaxBX() const
{
return max_b_x_;
}
int GVoxelGrid::getMaxBY() const
{
return max_b_y_;
}
int GVoxelGrid::getMaxBZ() const
{
return max_b_z_;
}
int GVoxelGrid::getMinBX() const
{
return min_b_x_;
}
int GVoxelGrid::getMinBY() const
{
return min_b_y_;
}
int GVoxelGrid::getMinBZ() const
{
return min_b_z_;
}
int GVoxelGrid::getVgridX() const
{
return vgrid_x_;
}
int GVoxelGrid::getVgridY() const
{
return vgrid_y_;
}
int GVoxelGrid::getVgridZ() const
{
return vgrid_z_;
}
void GVoxelGrid::setLeafSize(float voxel_x, float voxel_y, float voxel_z)
{
voxel_x_ = voxel_x;
voxel_y_ = voxel_y;
voxel_z_ = voxel_z;
}
double* GVoxelGrid::getCentroidList() const
{
return centroid_;
}
double* GVoxelGrid::getCovarianceList() const
{
return covariance_;
}
double* GVoxelGrid::getInverseCovarianceList() const
{
return inverse_covariance_;
}
int* GVoxelGrid::getPointsPerVoxelList() const
{
return points_per_voxel_;
}
extern "C" __device__ int voxelId(float x, float y, float z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z)
{
int id_x = static_cast<int>(floorf(x / voxel_x) - static_cast<float>(min_b_x));
int id_y = static_cast<int>(floorf(y / voxel_y) - static_cast<float>(min_b_y));
int id_z = static_cast<int>(floorf(z / voxel_z) - static_cast<float>(min_b_z));
return (id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y);
}
/* First step to compute centroids and covariances of voxels. */
extern "C" __global__ void initCentroidAndCovariance(float *x, float *y, float *z, int *starting_point_ids, int *point_ids,
double *centroids, double *covariances, int voxel_num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < voxel_num; i += stride) {
MatrixDevice centr(3, 1, voxel_num, centroids + i);
MatrixDevice cov(3, 3, voxel_num, covariances + i);
double centr0, centr1, centr2;
double cov00, cov01, cov02, cov11, cov12, cov22;
centr0 = centr1 = centr2 = 0.0;
cov00 = cov11 = cov22 = 1.0;
cov01 = cov02 = cov12 = 0.0;
for (int j = starting_point_ids[i]; j < starting_point_ids[i + 1]; j++) {
int pid = point_ids[j];
double t_x = static_cast<double>(x[pid]);
double t_y = static_cast<double>(y[pid]);
double t_z = static_cast<double>(z[pid]);
centr0 += t_x;
centr1 += t_y;
centr2 += t_z;
cov00 += t_x * t_x;
cov01 += t_x * t_y;
cov02 += t_x * t_z;
cov11 += t_y * t_y;
cov12 += t_y * t_z;
cov22 += t_z * t_z;
}
centr(0) = centr0;
centr(1) = centr1;
centr(2) = centr2;
cov(0, 0) = cov00;
cov(0, 1) = cov01;
cov(0, 2) = cov02;
cov(1, 1) = cov11;
cov(1, 2) = cov12;
cov(2, 2) = cov22;
}
}
/* Update centroids of voxels. */
extern "C" __global__ void updateVoxelCentroid(double *centroid, int *points_per_voxel, int voxel_num)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
if (points_num > 0) {
centr /= points_num;
}
}
}
/* Update covariance of voxels. */
extern "C" __global__ void updateVoxelCovariance(double *centroid, double *pt_sum, double *covariance, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice pts(3, 1, voxel_num, pt_sum + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
double c0 = centr(0);
double c1 = centr(1);
double c2 = centr(2);
double p0 = pts(0);
double p1 = pts(1);
double p2 = pts(2);
points_per_voxel[vid] = (points_num < min_points_per_voxel) ? 0 : points_num;
if (points_num >= min_points_per_voxel) {
double mult = (points_num - 1.0) / points_num;
cov(0, 0) = ((cov(0, 0) - 2.0 * p0 * c0) / points_num + c0 * c0) * mult;
cov(0, 1) = ((cov(0, 1) - 2.0 * p0 * c1) / points_num + c0 * c1) * mult;
cov(0, 2) = ((cov(0, 2) - 2.0 * p0 * c2) / points_num + c0 * c2) * mult;
cov(1, 0) = cov(0, 1);
cov(1, 1) = ((cov(1, 1) - 2.0 * p1 * c1) / points_num + c1 * c1) * mult;
cov(1, 2) = ((cov(1, 2) - 2.0 * p1 * c2) / points_num + c1 * c2) * mult;
cov(2, 0) = cov(0, 2);
cov(2, 1) = cov(1, 2);
cov(2, 2) = ((cov(2, 2) - 2.0 * p2 * c2) / points_num + c2 * c2) * mult;
}
}
}
extern "C" __global__ void computeInverseEigenvectors(double *inverse_covariance, int *points_per_voxel, int voxel_num, double *eigenvectors, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
eigen_vectors.inverse(icov);
}
__syncthreads();
}
}
//eigen_vecs = eigen_vecs * eigen_val
extern "C" __global__ void updateCovarianceS0(int *points_per_voxel, int voxel_num, double *eigenvalues, double *eigenvectors, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double eig_val0 = eigenvalues[vid];
double eig_val1 = eigenvalues[vid + voxel_num];
double eig_val2 = eigenvalues[vid + 2 * voxel_num];
eigen_vectors(0, 0) *= eig_val0;
eigen_vectors(1, 0) *= eig_val0;
eigen_vectors(2, 0) *= eig_val0;
eigen_vectors(0, 1) *= eig_val1;
eigen_vectors(1, 1) *= eig_val1;
eigen_vectors(2, 1) *= eig_val1;
eigen_vectors(0, 2) *= eig_val2;
eigen_vectors(1, 2) *= eig_val2;
eigen_vectors(2, 2) *= eig_val2;
}
__syncthreads();
}
}
//cov = new eigen_vecs * eigen_vecs transpose
extern "C" __global__ void updateCovarianceS1(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num, double *eigenvectors, int min_points_per_voxel, int col)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double tmp0 = icov(0, col);
double tmp1 = icov(1, col);
double tmp2 = icov(2, col);
cov(0, col) = eigen_vectors(0, 0) * tmp0 + eigen_vectors(0, 1) * tmp1 + eigen_vectors(0, 2) * tmp2;
cov(1, col) = eigen_vectors(1, 0) * tmp0 + eigen_vectors(1, 1) * tmp1 + eigen_vectors(1, 2) * tmp2;
cov(2, col) = eigen_vectors(2, 0) * tmp0 + eigen_vectors(2, 1) * tmp1 + eigen_vectors(2, 2) * tmp2;
}
__syncthreads();
}
}
extern "C" __global__ void computeInverseCovariance(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
cov.inverse(icov);
}
__syncthreads();
}
}
template<typename T>
__global__ void init(T *input, int size, int local_size)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
for (int j = 0; j < local_size; j++)
input[i + j * size] = 1;
}
}
extern "C" __global__ void initBoolean(bool *input, int size)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
input[i] = (i % 2 == 0) ? true : false;
}
}
/* Normalize input matrices to avoid overflow. */
extern "C" __global__ void normalize(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {}
sv.normalizeInput(id);
__syncthreads();
}
}
/* Compute eigenvalues. Eigenvalues are arranged in increasing order.
* (eigen(0) <= eigen(1) <= eigen(2). */
extern "C" __global__ void computeEigenvalues(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvalues(id);
__syncthreads();
}
}
/* First step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void computeEvec00(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector00(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void computeEvec01(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector01(id);
__syncthreads();
}
}
/* First step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void computeEvec10(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector10(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void computeEvec11(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector11(id);
__syncthreads();
}
}
/* Compute eigenvector 2 of covariance matrices. */
extern "C" __global__ void computeEvec2(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector2(id);
__syncthreads();
}
}
/* Final step to compute eigenvalues. */
extern "C" __global__ void updateEval(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.updateEigenvalues(id);
__syncthreads();
}
}
/* Update eigenvalues in the case covariance matrix is nearly singular. */
extern "C" __global__ void updateEval2(double *eigenvalues, int *points_per_voxel, int voxel_num, int min_points_per_voxel)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {
MatrixDevice eigen_val(3, 1, voxel_num, eigenvalues + id);
double ev0 = eigen_val(0);
double ev1 = eigen_val(1);
double ev2 = eigen_val(2);
if (ev0 < 0 || ev1 < 0 || ev2 <= 0) {
points_per_voxel[id] = 0;
continue;
}
double min_cov_eigvalue = ev2 * 0.01;
if (ev0 < min_cov_eigvalue) {
ev0 = min_cov_eigvalue;
if (ev1 < min_cov_eigvalue) {
ev1 = min_cov_eigvalue;
}
}
eigen_val(0) = ev0;
eigen_val(1) = ev1;
eigen_val(2) = ev2;
__syncthreads();
}
}
}
void GVoxelGrid::computeCentroidAndCovariance()
{
int block_x = (voxel_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : voxel_num_;
int grid_x = (voxel_num_ - 1) / block_x + 1;
initCentroidAndCovariance<<<grid_x, block_x>>>(x_, y_, z_, starting_point_ids_, point_ids_, centroid_, covariance_, voxel_num_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
double *pt_sum;
checkCudaErrors(cudaMalloc(&pt_sum, sizeof(double) * voxel_num_ * 3));
checkCudaErrors(cudaMemcpy(pt_sum, centroid_, sizeof(double) * voxel_num_ * 3, cudaMemcpyDeviceToDevice));
updateVoxelCentroid<<<grid_x, block_x>>>(centroid_, points_per_voxel_, voxel_num_);
checkCudaErrors(cudaGetLastError());
updateVoxelCovariance<<<grid_x, block_x>>>(centroid_, pt_sum, covariance_, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(pt_sum));
double *eigenvalues_dev, *eigenvectors_dev;
checkCudaErrors(cudaMalloc(&eigenvalues_dev, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(cudaMalloc(&eigenvectors_dev, sizeof(double) * 9 * voxel_num_));
// Solving eigenvalues and eigenvectors problem by the GPU.
SymmetricEigensolver3x3 sv(voxel_num_);
sv.setInputMatrices(covariance_);
sv.setEigenvalues(eigenvalues_dev);
sv.setEigenvectors(eigenvectors_dev);
normalize<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEigenvalues<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec00<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec01<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec10<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec11<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec2<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
updateEval<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
updateEval2<<<grid_x, block_x>>>(eigenvalues_dev, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeInverseEigenvectors<<<grid_x, block_x>>>(inverse_covariance_, points_per_voxel_, voxel_num_, eigenvectors_dev, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
updateCovarianceS0<<<grid_x, block_x>>>(points_per_voxel_, voxel_num_, eigenvalues_dev, eigenvectors_dev, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
for (int i = 0; i < 3; i++) {
updateCovarianceS1<<<grid_x, block_x>>>(covariance_, inverse_covariance_, points_per_voxel_, voxel_num_, eigenvectors_dev, min_points_per_voxel_, i);
checkCudaErrors(cudaGetLastError());
}
checkCudaErrors(cudaDeviceSynchronize());
computeInverseCovariance<<<grid_x, block_x>>>(covariance_, inverse_covariance_, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
sv.memFree();
checkCudaErrors(cudaFree(eigenvalues_dev));
checkCudaErrors(cudaFree(eigenvectors_dev));
}
//Input are supposed to be in device memory
void GVoxelGrid::setInput(float *x, float *y, float *z, int points_num)
{
if (points_num <= 0)
return;
x_ = x;
y_ = y;
z_ = z;
points_num_ = points_num;
findBoundaries();
voxel_num_ = vgrid_x_ * vgrid_y_ * vgrid_z_;
initialize();
scatterPointsToVoxelGrid();
computeCentroidAndCovariance();
buildOctree();
}
/* Find the largest coordinate values */
extern "C" __global__ void findMax(float *x, float *y, float *z, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] >= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] >= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
/* Find the smallest coordinate values */
extern "C" __global__ void findMin(float *x, float *y, float *z, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] <= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] <= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
void GVoxelGrid::findBoundaries()
{
float *max_x, *max_y, *max_z, *min_x, *min_y, *min_z;
checkCudaErrors(cudaMalloc(&max_x, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&max_y, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&max_z, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&min_x, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&min_y, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&min_z, sizeof(float) * points_num_));
checkCudaErrors(cudaMemcpy(max_x, x_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(max_y, y_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(max_z, z_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_x, x_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_y, y_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_z, z_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
int points_num = points_num_;
while (points_num > 1) {
int half_points_num = (points_num - 1) / 2 + 1;
int block_x = (half_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_points_num;
int grid_x = (half_points_num - 1) / block_x + 1;
findMax<<<grid_x, block_x>>>(max_x, max_y, max_z, points_num, half_points_num);
checkCudaErrors(cudaGetLastError());
findMin<<<grid_x, block_x>>>(min_x, min_y, min_z, points_num, half_points_num);
checkCudaErrors(cudaGetLastError());
points_num = half_points_num;
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&max_x_, max_x, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&max_y_, max_y, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&max_z_, max_z, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_x_, min_x, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_y_, min_y, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_z_, min_z, sizeof(float), cudaMemcpyDeviceToHost));
max_b_x_ = static_cast<int> (floor(max_x_ / voxel_x_));
max_b_y_ = static_cast<int> (floor(max_y_ / voxel_y_));
max_b_z_ = static_cast<int> (floor(max_z_ / voxel_z_));
min_b_x_ = static_cast<int> (floor(min_x_ / voxel_x_));
min_b_y_ = static_cast<int> (floor(min_y_ / voxel_y_));
min_b_z_ = static_cast<int> (floor(min_z_ / voxel_z_));
vgrid_x_ = max_b_x_ - min_b_x_ + 1;
vgrid_y_ = max_b_y_ - min_b_y_ + 1;
vgrid_z_ = max_b_z_ - min_b_z_ + 1;
checkCudaErrors(cudaFree(max_x));
checkCudaErrors(cudaFree(max_y));
checkCudaErrors(cudaFree(max_z));
checkCudaErrors(cudaFree(min_x));
checkCudaErrors(cudaFree(min_y));
checkCudaErrors(cudaFree(min_z));
}
/* Find indexes idx, idy and idz of candidate voxels */
extern "C" __global__ void findBoundariesOfCandidateVoxels(float *x, float *y, float *z,
float radius, int points_num,
float voxel_x, float voxel_y, float voxel_z,
int max_b_x, int max_b_y, int max_b_z,
int min_b_x, int min_b_y, int min_b_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *candidate_voxel_per_point)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int max_id_x = static_cast<int>(floorf((t_x + radius) / voxel_x));
int max_id_y = static_cast<int>(floorf((t_y + radius) / voxel_y));
int max_id_z = static_cast<int>(floorf((t_z + radius) / voxel_z));
int min_id_x = static_cast<int>(floorf((t_x - radius) / voxel_x));
int min_id_y = static_cast<int>(floorf((t_y - radius) / voxel_y));
int min_id_z = static_cast<int>(floorf((t_z - radius) / voxel_z));
/* Find intersection of the cube containing
* the NN sphere of the point and the voxel grid
*/
max_id_x = (max_id_x > max_b_x) ? max_b_x - min_b_x : max_id_x - min_b_x;
max_id_y = (max_id_y > max_b_y) ? max_b_y - min_b_y : max_id_y - min_b_y;
max_id_z = (max_id_z > max_b_z) ? max_b_z - min_b_z : max_id_z - min_b_z;
min_id_x = (min_id_x < min_b_x) ? 0 : min_id_x - min_b_x;
min_id_y = (min_id_y < min_b_y) ? 0 : min_id_y - min_b_y;
min_id_z = (min_id_z < min_b_z) ? 0 : min_id_z - min_b_z;
int vx = max_id_x - min_id_x + 1;
int vy = max_id_y - min_id_y + 1;
int vz = max_id_z - min_id_z + 1;
candidate_voxel_per_point[i] = (vx > 0 && vy > 0 && vz > 0) ? vx * vy * vz : 0;
max_vid_x[i] = max_id_x;
max_vid_y[i] = max_id_y;
max_vid_z[i] = max_id_z;
min_vid_x[i] = min_id_x;
min_vid_y[i] = min_id_y;
min_vid_z[i] = min_id_z;
}
}
/* Write id of valid points to the output buffer */
extern "C" __global__ void collectValidPoints(int *valid_points_mark, int *valid_points_id, int *valid_points_location, int points_num)
{
for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < points_num; index += blockDim.x * gridDim.x) {
if (valid_points_mark[index] != 0) {
valid_points_id[valid_points_location[index]] = index;
}
}
}
/* Compute the global index of candidate voxels.
* global index = idx + idy * grid size x + idz * grid_size x * grid size y */
extern "C" __global__ void updateCandidateVoxelIds(int points_num,
int vgrid_x, int vgrid_y, int vgrid_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *starting_voxel_id,
int *candidate_voxel_id)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
int max_id_x = max_vid_x[i];
int max_id_y = max_vid_y[i];
int max_id_z = max_vid_z[i];
int min_id_x = min_vid_x[i];
int min_id_y = min_vid_y[i];
int min_id_z = min_vid_z[i];
int write_location = starting_voxel_id[i];
for (int j = min_id_x; j <= max_id_x; j++) {
for (int k = min_id_y; k <= max_id_y; k++) {
for (int l = min_id_z; l <= max_id_z; l++) {
candidate_voxel_id[write_location] = j + k * vgrid_x + l * vgrid_x * vgrid_y;
write_location++;
}
}
}
}
}
/* Find out which voxels are really inside the radius.
* This is done by comparing the distance between the centroid
* of the voxel and the query point with the radius.
*
* The valid_voxel_mark store the result of the inspection, which is 0
* if the centroid is outside the radius and 1 otherwise.
*
* The valid_points_mark store the status of the inspection per point.
* It is 0 if there is no voxels in the candidate list is truly a neighbor
* of the point, and 1 otherwise.
*
* The valid_voxel_count store the number of true neighbor voxels.
*/
extern "C" __global__ void inspectCandidateVoxels(float *x, float *y, float *z,
float radius, int max_nn, int points_num,
double *centroid, int *points_per_voxel, int offset,
int *starting_voxel_id, int *candidate_voxel_id,
int *valid_voxel_mark, int *valid_voxel_count, int *valid_points_mark)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
int nn = 0;
for (int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1] && nn <= max_nn; j++) {
int point_num = points_per_voxel[candidate_voxel_id[j]];
MatrixDevice centr(3, 1, offset, centroid + candidate_voxel_id[j]);
double centroid_x = (point_num > 0) ? (t_x - centr(0)) : radius + 1;
double centroid_y = (point_num > 0) ? (t_y - centr(1)) : 0;
double centroid_z = (point_num > 0) ? (t_z - centr(2)) : 0;
bool res = (norm3d(centroid_x, centroid_y, centroid_z) <= radius);
valid_voxel_mark[j] = (res) ? 1 : 0;
nn += (res) ? 1 : 0;
}
valid_voxel_count[i] = nn;
valid_points_mark[i] = (nn > 0) ? 1 : 0;
__syncthreads();
}
}
/* Write the id of valid voxels to the output buffer */
extern "C" __global__ void collectValidVoxels(int *valid_voxels_mark, int *candidate_voxel_id, int *output, int *writing_location, int candidate_voxel_num)
{
for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < candidate_voxel_num; index += blockDim.x * gridDim.x) {
if (valid_voxels_mark[index] == 1) {
output[writing_location[index]] = candidate_voxel_id[index];
}
}
}
/* Write the number of valid voxel per point to the output buffer */
extern "C" __global__ void collectValidVoxelCount(int *input_valid_voxel_count, int *output_valid_voxel_count, int *writing_location, int points_num)
{
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < points_num; id += blockDim.x * gridDim.x) {
if (input_valid_voxel_count[id] != 0)
output_valid_voxel_count[writing_location[id]] = input_valid_voxel_count[id];
}
}
template <typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num, T *sum)
{
thrust::device_ptr<T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(cudaDeviceSynchronize());
*sum = *(dev_ptr + ele_num - 1);
}
template <typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num)
{
thrust::device_ptr<T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(cudaDeviceSynchronize());
}
void GVoxelGrid::radiusSearch(float *qx, float *qy, float *qz, int points_num, float radius, int max_nn,
int **valid_points, int **starting_voxel_id, int **valid_voxel_id,
int *valid_voxel_num, int *valid_points_num)
{
//Testing input query points
int block_x = (points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num;
int grid_x = (points_num - 1) / block_x + 1;
//Boundaries of candidate voxels per points
int *max_vid_x, *max_vid_y, *max_vid_z;
int *min_vid_x, *min_vid_y, *min_vid_z;
checkCudaErrors(cudaMalloc(&max_vid_x, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&max_vid_y, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&max_vid_z, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&min_vid_x, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&min_vid_y, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&min_vid_z, sizeof(int) * points_num));
//Determine the number of candidate voxel per points
int *candidate_voxel_num_per_point;
int total_candidate_voxel_num;
checkCudaErrors(cudaMalloc(&candidate_voxel_num_per_point, sizeof(int) * (points_num + 1)));
findBoundariesOfCandidateVoxels<<<grid_x, block_x>>>(qx, qy, qz, radius, points_num,
voxel_x_, voxel_y_, voxel_z_,
max_b_x_, max_b_y_, max_b_z_,
min_b_x_, min_b_y_, min_b_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Total candidate voxel num is determined by an exclusive scan on candidate_voxel_num_per_point
ExclusiveScan(candidate_voxel_num_per_point, points_num + 1, &total_candidate_voxel_num);
if (total_candidate_voxel_num <= 0) {
std::cout << "No candidate voxel was found. Exiting..." << std::endl;
checkCudaErrors(cudaFree(max_vid_x));
checkCudaErrors(cudaFree(max_vid_y));
checkCudaErrors(cudaFree(max_vid_z));
checkCudaErrors(cudaFree(min_vid_x));
checkCudaErrors(cudaFree(min_vid_y));
checkCudaErrors(cudaFree(min_vid_z));
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
//Determine the voxel id of candidate voxels
int *candidate_voxel_id;
checkCudaErrors(cudaMalloc(&candidate_voxel_id, sizeof(int) * total_candidate_voxel_num));
updateCandidateVoxelIds<<<grid_x, block_x>>>(points_num, vgrid_x_, vgrid_y_, vgrid_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point, candidate_voxel_id);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Go through the candidate voxel id list and find out which voxels are really inside the radius
int *valid_voxel_mark;
checkCudaErrors(cudaMalloc(&valid_voxel_mark, sizeof(int) * total_candidate_voxel_num));
int *valid_voxel_count;
checkCudaErrors(cudaMalloc(&valid_voxel_count, sizeof(int) * (points_num + 1)));
int *valid_points_mark;
checkCudaErrors(cudaMalloc(&valid_points_mark, sizeof(int) * points_num));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
///CHECK VALID VOXEL COUNT AGAIN
inspectCandidateVoxels<<<grid_x, block_x>>>(qx, qy, qz, radius, max_nn, points_num,
centroid_, points_per_voxel_, voxel_num_,
candidate_voxel_num_per_point, candidate_voxel_id,
valid_voxel_mark, valid_voxel_count, valid_points_mark);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Collect valid points
int *valid_points_location;
checkCudaErrors(cudaMalloc(&valid_points_location, sizeof(int) * (points_num + 1)));
checkCudaErrors(cudaMemset(valid_points_location, 0, sizeof(int) * (points_num + 1)));
checkCudaErrors(cudaMemcpy(valid_points_location, valid_points_mark, sizeof(int) * points_num, cudaMemcpyDeviceToDevice));
//Writing location to the output buffer is determined by an exclusive scan
ExclusiveScan(valid_points_location, points_num + 1, valid_points_num);
if (*valid_points_num <= 0) {
//std::cout << "No valid point was found. Exiting..." << std::endl;
std::cout << "No valid point was found. Exiting...: " << *valid_points_num << std::endl;
checkCudaErrors(cudaFree(max_vid_x));
checkCudaErrors(cudaFree(max_vid_y));
checkCudaErrors(cudaFree(max_vid_z));
checkCudaErrors(cudaFree(min_vid_x));
checkCudaErrors(cudaFree(min_vid_y));
checkCudaErrors(cudaFree(min_vid_z));
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
checkCudaErrors(cudaFree(candidate_voxel_id));
checkCudaErrors(cudaFree(valid_voxel_mark));
checkCudaErrors(cudaFree(valid_voxel_count));
checkCudaErrors(cudaFree(valid_points_mark));
checkCudaErrors(cudaFree(valid_points_location));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
checkCudaErrors(cudaMalloc(valid_points, sizeof(int) * (*valid_points_num)));
collectValidPoints<<<grid_x, block_x>>>(valid_points_mark, *valid_points, valid_points_location, points_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMalloc(starting_voxel_id, sizeof(int) * (*valid_points_num + 1)));
collectValidVoxelCount<<<grid_x, block_x>>>(valid_voxel_count, *starting_voxel_id, valid_points_location, points_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Determine the starting location of voxels per points in the valid points list
ExclusiveScan(*starting_voxel_id, *valid_points_num + 1, valid_voxel_num);
//Collect valid voxels
int *valid_voxel_location;
checkCudaErrors(cudaMalloc(&valid_voxel_location, sizeof(int) * (total_candidate_voxel_num + 1)));
checkCudaErrors(cudaMemcpy(valid_voxel_location, valid_voxel_mark, sizeof(int) * total_candidate_voxel_num, cudaMemcpyDeviceToDevice));
ExclusiveScan(valid_voxel_location, total_candidate_voxel_num + 1, valid_voxel_num);
if (*valid_voxel_num <= 0) {
checkCudaErrors(cudaFree(max_vid_x));
max_vid_x = NULL;
checkCudaErrors(cudaFree(max_vid_y));
max_vid_y = NULL;
checkCudaErrors(cudaFree(max_vid_z));
max_vid_z = NULL;
checkCudaErrors(cudaFree(min_vid_x));
min_vid_x = NULL;
checkCudaErrors(cudaFree(min_vid_y));
min_vid_y = NULL;
checkCudaErrors(cudaFree(min_vid_z));
min_vid_z = NULL;
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
candidate_voxel_num_per_point = NULL;
checkCudaErrors(cudaFree(candidate_voxel_id));
candidate_voxel_id = NULL;
checkCudaErrors(cudaFree(valid_voxel_mark));
valid_voxel_mark = NULL;
checkCudaErrors(cudaFree(valid_voxel_count));
valid_voxel_count = NULL;
checkCudaErrors(cudaFree(valid_points_mark));
valid_points_mark = NULL;
checkCudaErrors(cudaFree(valid_points_location));
valid_points_location = NULL;
checkCudaErrors(cudaFree(valid_voxel_location));
valid_voxel_location = NULL;
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
}
checkCudaErrors(cudaMalloc(valid_voxel_id, sizeof(int) * (*valid_voxel_num)));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
collectValidVoxels<<<grid_x, block_x>>>(valid_voxel_mark, candidate_voxel_id, *valid_voxel_id, valid_voxel_location, total_candidate_voxel_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(max_vid_x));
checkCudaErrors(cudaFree(max_vid_y));
checkCudaErrors(cudaFree(max_vid_z));
checkCudaErrors(cudaFree(min_vid_x));
checkCudaErrors(cudaFree(min_vid_y));
checkCudaErrors(cudaFree(min_vid_z));
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
checkCudaErrors(cudaFree(candidate_voxel_id));
checkCudaErrors(cudaFree(valid_voxel_mark));
checkCudaErrors(cudaFree(valid_points_mark));
checkCudaErrors(cudaFree(valid_voxel_count));
checkCudaErrors(cudaFree(valid_points_location));
checkCudaErrors(cudaFree(valid_voxel_location));
}
/* Build parent nodes from child nodes of the octree */
extern "C" __global__ void buildParent(double *child_centroids, int *points_per_child,
int child_grid_x, int child_grid_y, int child_grid_z, int child_num,
double *parent_centroids, int *points_per_parent,
int parent_grid_x, int parent_grid_y, int parent_grid_z)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int idz = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < parent_grid_x && idy < parent_grid_y && idz < parent_grid_z) {
int parent_idx = idx + idy * parent_grid_x + idz * parent_grid_x * parent_grid_y;
MatrixDevice parent_centr(3, 1, parent_grid_x * parent_grid_y * parent_grid_z, parent_centroids + parent_idx);
double pc0, pc1, pc2;
int points_num = 0;
double dpoints_num;
pc0 = 0.0;
pc1 = 0.0;
pc2 = 0.0;
for (int i = idx * 2; i < idx * 2 + 2 && i < child_grid_x; i++) {
for (int j = idy * 2; j < idy * 2 + 2 && j < child_grid_y; j++) {
for (int k = idz * 2; k < idz * 2 + 2 && k < child_grid_z; k++) {
int child_idx = i + j * child_grid_x + k * child_grid_x * child_grid_y;
MatrixDevice child_centr(3, 1, child_num, child_centroids + child_idx);
int child_points = points_per_child[child_idx];
double dchild_points = static_cast<double>(child_points);
pc0 += (child_points > 0) ? dchild_points * child_centr(0) : 0.0;
pc1 += (child_points > 0) ? dchild_points * child_centr(1) : 0.0;
pc2 += (child_points > 0) ? dchild_points * child_centr(2) : 0.0;
points_num += (child_points > 0) ? child_points : 0;
__syncthreads();
}
}
}
dpoints_num = static_cast<double>(points_num);
parent_centr(0) = (points_num <= 0) ? DBL_MAX : pc0 / dpoints_num;
parent_centr(1) = (points_num <= 0) ? DBL_MAX : pc1 / dpoints_num;
parent_centr(2) = (points_num <= 0) ? DBL_MAX : pc2 / dpoints_num;
points_per_parent[parent_idx] = points_num;
}
}
/* Compute the number of points per voxel using atomicAdd */
extern "C" __global__ void insertPointsToGrid(float *x, float *y, float *z, int points_num,
int *points_per_voxel, int voxel_num,
int vgrid_x, int vgrid_y, int vgrid_z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int voxel_id = voxelId(t_x, t_y, t_z, voxel_x, voxel_y, voxel_z, min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z);
// Update number of points in the voxel
int ptr_increment = (voxel_id < voxel_num) * voxel_id; // if (voxel_id < voxel_num), then use voxel_id
int incremental_value = (voxel_id < voxel_num);
//atomicAdd(points_per_voxel + voxel_id, 1);
atomicAdd(points_per_voxel + ptr_increment, incremental_value);
}
}
/* Rearrange points to locations corresponding to voxels */
extern "C" __global__ void scatterPointsToVoxels(float *x, float *y, float *z, int points_num, int voxel_num,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z,
int *writing_locations, int *point_ids)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int voxel_id = voxelId(x[i], y[i], z[i], voxel_x, voxel_y, voxel_z,
min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z);
int ptr_increment = (voxel_id < voxel_num) * voxel_id;
int incremental_value = (voxel_id < voxel_num);
//int loc = atomicAdd(writing_locations + voxel_id, 1);
int loc = atomicAdd(writing_locations + ptr_increment, incremental_value);
point_ids[loc] = i;
}
}
void GVoxelGrid::scatterPointsToVoxelGrid()
{
if (starting_point_ids_ != NULL) {
checkCudaErrors(cudaFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(cudaFree(point_ids_));
point_ids_ = NULL;
}
int block_x = (points_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num_;
int grid_x = (points_num_ - 1) / block_x + 1;
insertPointsToGrid<<<grid_x, block_x>>>(x_, y_, z_, points_num_, points_per_voxel_, voxel_num_,
vgrid_x_, vgrid_y_, vgrid_z_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMalloc(&starting_point_ids_, sizeof(int) * (voxel_num_ + 1)));
int *writing_location;
checkCudaErrors(cudaMalloc(&writing_location, sizeof(int) * voxel_num_));
checkCudaErrors(cudaMemcpy(starting_point_ids_, points_per_voxel_, sizeof(int) * voxel_num_, cudaMemcpyDeviceToDevice));
ExclusiveScan(starting_point_ids_, voxel_num_ + 1);
checkCudaErrors(cudaMemcpy(writing_location, starting_point_ids_, sizeof(int) * voxel_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMalloc(&point_ids_, sizeof(int) * points_num_));
scatterPointsToVoxels<<<grid_x, block_x>>>(x_, y_, z_, points_num_, voxel_num_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_,
vgrid_x_, vgrid_y_, vgrid_z_,
writing_location, point_ids_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(writing_location));
}
void GVoxelGrid::buildOctree()
{
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(cudaFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(cudaFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
//Push leafs to the octree list
octree_centroids_.push_back(centroid_);
octree_points_per_node_.push_back(points_per_voxel_);
OctreeGridSize grid_size;
grid_size.size_x = vgrid_x_;
grid_size.size_y = vgrid_y_;
grid_size.size_z = vgrid_z_;
octree_grid_size_.push_back(grid_size);
int node_number = voxel_num_;
int child_grid_x, child_grid_y, child_grid_z;
int parent_grid_x, parent_grid_y, parent_grid_z;
int i = 0;
while (node_number > 100000000) {
child_grid_x = octree_grid_size_[i].size_x;
child_grid_y = octree_grid_size_[i].size_y;
child_grid_z = octree_grid_size_[i].size_z;
parent_grid_x = (child_grid_x - 1) / 2 + 1;
parent_grid_y = (child_grid_y - 1) / 2 + 1;
parent_grid_z = (child_grid_z - 1) / 2 + 1;
node_number = parent_grid_x * parent_grid_y * parent_grid_z;
double *parent_centroids;
int *points_per_parent;
checkCudaErrors(cudaMalloc(&parent_centroids, sizeof(double) * 3 * node_number));
checkCudaErrors(cudaMalloc(&points_per_parent, sizeof(int) * node_number));
double *child_centroids = octree_centroids_[i];
int *points_per_child = octree_points_per_node_[i];
int block_x = (parent_grid_x > BLOCK_X) ? BLOCK_X : parent_grid_x;
int block_y = (parent_grid_y > BLOCK_Y) ? BLOCK_Y : parent_grid_y;
int block_z = (parent_grid_z > BLOCK_Z) ? BLOCK_Z : parent_grid_z;
int grid_x = (parent_grid_x - 1) / block_x + 1;
int grid_y = (parent_grid_y - 1) / block_y + 1;
int grid_z = (parent_grid_z - 1) / block_z + 1;
dim3 block(block_x, block_y, block_z);
dim3 grid(grid_x, grid_y, grid_z);
buildParent<<<grid, block>>>(child_centroids, points_per_child,
child_grid_x, child_grid_y, child_grid_z, child_grid_x * child_grid_y * child_grid_z,
parent_centroids, points_per_parent,
parent_grid_x, parent_grid_y, parent_grid_z);
checkCudaErrors(cudaGetLastError());
octree_centroids_.push_back(parent_centroids);
octree_points_per_node_.push_back(points_per_parent);
grid_size.size_x = parent_grid_x;
grid_size.size_y = parent_grid_y;
grid_size.size_z = parent_grid_z;
octree_grid_size_.push_back(grid_size);
i++;
}
checkCudaErrors(cudaDeviceSynchronize());
}
/* Search for the nearest octree node */
extern "C" __global__ void nearestOctreeNodeSearch(float *x, float *y, float *z,
int *vid_x, int *vid_y, int *vid_z,
int points_num,
double *centroids, int *points_per_node,
int vgrid_x, int vgrid_y, int vgrid_z, int node_num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int vx = vid_x[i];
int vy = vid_y[i];
int vz = vid_z[i];
double min_dist = DBL_MAX;
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
double cur_dist;
int out_x, out_y, out_z;
out_x = vx;
out_y = vy;
out_z = vz;
double tmp_x, tmp_y, tmp_z;
for (int j = vx * 2; j < vx * 2 + 2 && j < vgrid_x; j++) {
for (int k = vy * 2; k < vy * 2 + 2 && k < vgrid_y; k++) {
for (int l = vz * 2; l < vz * 2 + 2 && l < vgrid_z; l++) {
int node_id = j + k * vgrid_x + l * vgrid_x * vgrid_y;
MatrixDevice node_centr(3, 1, node_num, centroids + node_id);
int points = points_per_node[node_id];
tmp_x = (points > 0) ? node_centr(0) - t_x : DBL_MAX;
tmp_y = (points > 0) ? node_centr(1) - t_y : 0.0;
tmp_z = (points > 0) ? node_centr(2) - t_z : 0.0;
cur_dist = norm3d(tmp_x, tmp_y, tmp_z);
bool res = (cur_dist < min_dist);
out_x = (res) ? j : out_x;
out_y = (res) ? k : out_y;
out_z = (res) ? l : out_z;
min_dist = (res) ? cur_dist : min_dist;
}
}
}
vid_x[i] = out_x;
vid_y[i] = out_y;
vid_z[i] = out_z;
}
}
/* Search for the nearest point from nearest voxel */
extern "C" __global__ void nearestPointSearch(float *qx, float *qy, float *qz, int qpoints_num,
float *rx, float *ry, float *rz, int rpoints_num,
int *vid_x, int *vid_y, int *vid_z,
int vgrid_x, int vgrid_y, int vgrid_z, int voxel_num,
int *starting_point_id, int *point_id, double *min_distance)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < qpoints_num; i += stride) {
int voxel_id = vid_x[i] + vid_y[i] * vgrid_x + vid_z[i] * vgrid_x * vgrid_y;
float cor_qx = qx[i];
float cor_qy = qy[i];
float cor_qz = qz[i];
float min_dist = FLT_MAX;
for (int j = starting_point_id[voxel_id]; j < starting_point_id[voxel_id + 1]; j++) {
int pid = point_id[j];
float cor_rx = rx[pid];
float cor_ry = ry[pid];
float cor_rz = rz[pid];
cor_rx -= cor_qx;
cor_ry -= cor_qy;
cor_rz -= cor_qz;
min_dist = fminf(norm3df(cor_rx, cor_ry, cor_rz), min_dist);
}
min_distance[i] = static_cast<double>(min_dist);
}
}
/* Verify if min distances are really smaller than or equal to max_range */
extern "C" __global__ void verifyDistances(int *valid_distance, double *min_distance, double max_range, int points_num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
bool check = (min_distance[i] <= max_range);
valid_distance[i] = (check) ? 1 : 0;
if (!check) {
min_distance[i] = 0;
}
}
}
void GVoxelGrid::nearestNeighborSearch(float *trans_x, float *trans_y, float *trans_z, int point_num, int *valid_distance, double *min_distance, float max_range)
{
int *vid_x, *vid_y, *vid_z;
checkCudaErrors(cudaMalloc(&vid_x, sizeof(int) * point_num));
checkCudaErrors(cudaMalloc(&vid_y, sizeof(int) * point_num));
checkCudaErrors(cudaMalloc(&vid_z, sizeof(int) * point_num));
checkCudaErrors(cudaMemset(vid_x, 0, sizeof(int) * point_num));
checkCudaErrors(cudaMemset(vid_y, 0, sizeof(int) * point_num));
checkCudaErrors(cudaMemset(vid_z, 0, sizeof(int) * point_num));
checkCudaErrors(cudaDeviceSynchronize());
int block_x = (point_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : point_num;
int grid_x = (point_num - 1) / block_x + 1;
// Go through top of the octree to the bottom
for (int i = octree_centroids_.size() - 1; i >= 0; i--) {
double *centroids = octree_centroids_[i];
int *points_per_node = octree_points_per_node_[i];
int vgrid_x = octree_grid_size_[i].size_x;
int vgrid_y = octree_grid_size_[i].size_y;
int vgrid_z = octree_grid_size_[i].size_z;
int node_num = vgrid_x * vgrid_y * vgrid_z;
nearestOctreeNodeSearch<<<grid_x, block_x>>>(trans_x, trans_y, trans_z,
vid_x, vid_y, vid_z,
point_num,
centroids, points_per_node,
vgrid_x, vgrid_y, vgrid_z, node_num);
checkCudaErrors(cudaGetLastError());
}
nearestPointSearch<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, point_num,
x_, y_, z_, points_num_,
vid_x, vid_y, vid_z,
vgrid_x_, vgrid_y_, vgrid_z_, voxel_num_,
starting_point_ids_, point_ids_,
min_distance);
checkCudaErrors(cudaGetLastError());
verifyDistances<<<grid_x, block_x>>>(valid_distance, min_distance, max_range, point_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(vid_x));
checkCudaErrors(cudaFree(vid_y));
checkCudaErrors(cudaFree(vid_z));
}
}
|
ab8f921a75d3a15e31271dbb315049836c284aaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#include"for_use_GPU.h"
/* declaration of texture memory */
//texture<FLOAT> A;
//texture<FLOAT> B;
texture<float, hipTextureType1D, hipReadModeElementType> A;
texture<float, hipTextureType1D, hipReadModeElementType> B;
texture<int2, hipTextureType1D, hipReadModeElementType> A_double;
texture<int2, hipTextureType1D, hipReadModeElementType> B_double;
//thread process
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// convolve A and B(non_symmetric)
//unsigned __stdcall process(void *thread_arg) {
/********************************************/
/* function for calculating root */
/********************************************/
extern "C"
__global__
void
process_root
(
//FLOAT *A,
//FLOAT *B,
FLOAT *C,
int *A_dims_array,
int *B_dims_array,
int len,
int interval,
int L_MAX,
int *error_array,
int error_array_num,
int pid,
int device_number
)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int ii = blockIdx.z % len;
int level = blockIdx.z / len;
int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] };
int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] };
int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 };
int C_x = C_dims[1]/device_number;
if(C_dims[1]%device_number != 0){
C_x++;
}
idx_x = idx_x + pid * C_x;
if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){
return ;
}
if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && interval <= level && level < L_MAX ) {
int num_features = A_dims[2];
const int A_SQ = A_dims[0]*A_dims[1];
const int B_SQ = B_dims[0]*B_dims[1];
FLOAT add_val = 0;
int x = idx_x;
int y = idx_y;
int XA0 = A_dims[0]*x;
/* apply loop condition */
for(int i=0; i<error_array_num; i++){
if(error_array[i] == level){
return;
}
}
/* adjust the location of pointer of C */
FLOAT *dst;
unsigned long long int pointer = (unsigned long long int)C;
for(int a=interval; a<level; a++) {
for(int b=0; b<len; b++) {
int height = A_dims_array[a*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if (height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int b=0; b<ii; b++){
int height = A_dims_array[level*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if (height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
dst = (FLOAT *)pointer;
/* adjust the location of pointer of A */
//unsigned long long int pointerA = (unsigned long long int)A;
int A_index_ini = 0;
for(int a=0; a<level; a++) {
// pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT));
A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2];
}
/* adjust the location of pointer of B */
//unsigned long long int pointerB = (unsigned long long int)B;
int B_index_ini = 0;
for(int b=0; b<ii; b++) {
// pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT));
B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2];
}
for(int f = 0; f < num_features; f++) // num_features = 31
{
// FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ;
int A_index = A_index_ini + f*A_SQ;
// FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ;
int B_index = B_index_ini + f*B_SQ;
// FLOAT *A_src2 =A_src+XA0;
A_index += XA0;
FLOAT val = 0;
// FLOAT *A_off = A_src2+y;
A_index += y;
// FLOAT *B_off = B_src;
for (int xp = 0; xp < B_dims[1]; xp++)
{
// FLOAT *A_temp = A_off;
int A_index_tmp = A_index;
// FLOAT *B_temp = B_off;
int B_index_tmp = B_index;
for (int yp = 0; yp < B_dims[0]; yp++)
{
// val += *(A_temp++) * *(B_temp++);
if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision
{
FLOAT A_val = tex1Dfetch(A, A_index_tmp);
FLOAT B_val = tex1Dfetch(B, B_index_tmp);
val += A_val * B_val;
}
else
{ // if configured to use double precision
int2 A_val = tex1Dfetch(A_double, A_index_tmp);
int2 B_val = tex1Dfetch(B_double, B_index_tmp);
val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x);
}
A_index_tmp++;
B_index_tmp++;
}
// A_off+=A_dims[0];
A_index += A_dims[0];
// B_off+=B_dims[0];
B_index += B_dims[0];
}
add_val += val;
}
*(dst + (idx_x*C_dims[0] + idx_y)) += add_val;
}
return;
}
/********************************************/
/* function for calculating part */
/********************************************/
extern "C"
__global__
void
process_part
(
//FLOAT *A,
//FLOAT *B,
FLOAT *C,
int *A_dims_array,
int *B_dims_array,
int len,
int interval,
int L_MAX,
int *error_array,
int error_array_num,
int pid,
int device_number
)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int ii = blockIdx.z % len;
int level = blockIdx.z / len;
int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] };
int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] };
int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 };
int C_x = C_dims[1]/device_number;
if(C_dims[1]%device_number != 0){
C_x++;
}
idx_x = idx_x + pid * C_x;
if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){
return ;
}
if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && 0 <= level && level < (L_MAX - interval) ) {
int num_features = A_dims[2];
const int A_SQ = A_dims[0]*A_dims[1];
const int B_SQ = B_dims[0]*B_dims[1];
FLOAT add_val = 0;
int x = idx_x;
int y = idx_y;
int XA0 = A_dims[0]*x;
/* apply loop condition */
for(int i=0; i<error_array_num; i++){
if(error_array[i] == level)
return;
}
/* adjust the location of pointer of C */
FLOAT *dst;
unsigned long long int pointer = (unsigned long long int)C;
for(int a=0; a<level; a++) {
for(int b=0; b<len; b++){
int height = A_dims_array[a*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if(height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int b=0; b<ii; b++){
int height = A_dims_array[level*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if(height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
dst = (FLOAT *)pointer;
/* adjust the location of pointer of A */
// unsigned long long int pointerA = (unsigned long long int)A;
int A_index_ini = 0;
for(int a=0; a<level; a++) {
// pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT));
A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2];
}
/* adjust the location of pointer of B */
// unsigned long long int pointerB = (unsigned long long int)B;
int B_index_ini = 0;
for(int b=0; b<ii; b++) {
// pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT));
B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2];
}
for(int f = 0; f < num_features; f++) // num_features = 31
{
// FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ;
int A_index = A_index_ini + f*A_SQ;
// FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ;
int B_index = B_index_ini + f*B_SQ;
// FLOAT *A_src2 =A_src+XA0;
A_index += XA0;
FLOAT val = 0;
// FLOAT *A_off = A_src2+y;
A_index += y;
// FLOAT *B_off = B_src;
for (int xp = 0; xp < B_dims[1]; xp++)
{
// FLOAT *A_temp = A_off;
int A_index_tmp = A_index;
// FLOAT *B_temp = B_off;
int B_index_tmp = B_index;
for (int yp = 0; yp < B_dims[0]; yp++)
{
// val += *(A_temp++) * *(B_temp++);
if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision
{
FLOAT A_val = tex1Dfetch(A, A_index_tmp);
FLOAT B_val = tex1Dfetch(B, B_index_tmp);
val += A_val * B_val;
}
else // if configured to use double precision
{
int2 A_val = tex1Dfetch(A_double, A_index_tmp);
int2 B_val = tex1Dfetch(B_double, B_index_tmp);
val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x);
}
A_index_tmp++;
B_index_tmp++;
}
// A_off+=A_dims[0];
A_index += A_dims[0];
// B_off+=B_dims[0];
B_index += B_dims[0];
}
add_val += val;
}
*(dst + (idx_x*C_dims[0] + idx_y)) += add_val;
}
return;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
__global__
void
inverse_Q(
FLOAT *src_start,
int *size_array,
int *error_array,
int error_array_num,
int NoP,
int *PIDX_array,
int *numpart,
int NoC,
int max_numpart,
int interval,
int L_MAX,
int pid,
int device_number
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int kk = blockIdx.y * blockDim.y + threadIdx.y;
int jj = threadIdx.z;
int L = blockIdx.z;
int numpart_jj;
int C_y;
if(0<=jj && jj<NoC)
{
numpart_jj = numpart[jj];
C_y = numpart_jj/device_number;
if(numpart_jj%device_number != 0){
C_y++;
}
kk = kk + pid * C_y;
if(kk < C_y * pid || kk >= C_y * (pid + 1)){
return ;
}
} else return ;
if(0<=L && L < (L_MAX-interval))
{
/* loop condition */
for(int h=0; h<error_array_num; h++) {
if(L==error_array[h]){
return;
}
}
if( 0<=kk && kk < numpart_jj )
{
int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
int dim0 = size_array[L*NoP*2 + PIDX*2];
int dim1 = size_array[L*NoP*2 + PIDX*2+1];
if( idx < 0 || dim0*dim1 <= idx) return;
/* pointer adjustment */
FLOAT *src;
unsigned long long int ptr_adjuster = (unsigned long long int)src_start;
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoP; j++) {
int height = size_array[i*NoP*2 + j*2];
int width = size_array[i*NoP*2 + j*2+1];
ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int j=0; j<PIDX; j++) {
int height = size_array[L*NoP*2 + j*2];
int width = size_array[L*NoP*2 + j*2+1];
ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT));
}
src = (FLOAT *)ptr_adjuster;
*(src + idx) *= -1;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// dt helper function
__device__
void
dt_helper(FLOAT *src, FLOAT *dst, int *ptr, int step, int s1, int s2, int d1, int d2, FLOAT a, FLOAT b)
{
if (d2 >= d1)
{
int d = (d1+d2) >> 1;
int ds =d*step;
int s = s1;
FLOAT src_ss = *(src+s*step);
for (int p = s1+1; p <= s2; p++)
{
int t1 = d-s;
int t2 = d-p;
if (src_ss + a*t1*t1 + b*t1 > *(src+p*step) + a*t2*t2 + b*t2)
{
s = p;
src_ss = *(src+s*step);
}
}
int D = d-s;
dst[ds] = *(src+s*step) + a*D*D + b*D;
ptr[ds] = s;
dt_helper(src, dst, ptr, step, s1, s, d1, d-1, a, b);
dt_helper(src, dst, ptr, step, s, s2, d+1, d2, a, b);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//sub function of dt
extern "C"
__global__
void
dt1d_x(
FLOAT *src_start, // part_C_dev
FLOAT *dst_start, // tmpM_dev
int *ptr_start, // tmpIy_dev
int *DID_4_array, // DID_4_array_dev
FLOAT *def_array, // def_array_dev
int *size_array, // pm_size_array_dev
int NoP, // NoP
int *PIDX_array, // PIDX_array_dev
int *error_array, // part_error_array_dev
int error_array_num, // part_error_array_num
int *numpart, // numpart_jj
int NoC, // NoC
int max_numpart, // max_numpart
int interval, // interval
int L_MAX, // L_MAX
int pid, // pid
int device_number // device_number
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int kk = blockIdx.y * blockDim.y + threadIdx.y;
int jj = threadIdx.z;
int L = blockIdx.z;
int numpart_jj;
int C_y;
if(0<=jj && jj<NoC)
{
numpart_jj = numpart[jj];
C_y = numpart_jj/device_number;
if(numpart_jj%device_number != 0){
C_y++;
}
kk = kk + pid * C_y;
if(kk < C_y * pid || kk >= C_y * (pid + 1)){
return ;
}
} else{
return ;
}
if(0<=L && L<(L_MAX-interval))
{
/* loop condition */
for(int h=0; h<error_array_num; h++) {
if(L==error_array[h]){
return;
}
}
if(0<=kk && kk<numpart_jj)
{
int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
int dim1 = size_array[L*NoP*2 + PIDX*2+1];
if( idx < 0 || dim1 <= idx ) return;
int dim0 = size_array[L*NoP*2 + PIDX*2];
int XD=0;
int step = 1;
int n = dim0;
int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
FLOAT a = def_array[DID_4+2];
FLOAT b = def_array[DID_4+3];
/* pointer adjustment */
unsigned long long int adj_src = (unsigned long long int)src_start;
unsigned long long int adj_dst = (unsigned long long int)dst_start;
unsigned long long int adj_ptr = (unsigned long long int)ptr_start;
/* for src */
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoP; j++) {
int height = size_array[i*NoP*2 + j*2];
int width = size_array[i*NoP*2 + j*2+1];
adj_src += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int j=0; j<PIDX; j++) {
int height = size_array[L*NoP*2 + j*2];
int width = size_array[L*NoP*2 + j*2+1];
adj_src += (unsigned long long int)(height*width*sizeof(FLOAT));
}
/* for dst, ptr */
// adjust "dst" to tmpM[L][jj][kk]
// adjust "ptr" to tmpIy[L][jj][kk]
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoC; j++) {
for(int k=0; k<numpart[j]; k++) {
int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k];
int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2];
int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1];
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
}
for(int i=0; i<jj; i++) {
for(int j=0; j<numpart[i]; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j]
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2]
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1]
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
for(int j=0; j<kk; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j]; // PIDX_array[L][jj][j]
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2]
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1]
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
FLOAT *src = (FLOAT *)adj_src;
FLOAT *dst = (FLOAT *)adj_dst;
int *ptr = (int *)adj_ptr;
/* main calculation of di1d_x */
XD = idx*dim0;
dt_helper(src+XD, dst+XD, ptr+XD, step, 0, n-1, 0, n-1, a, b);
}
}
}
extern "C"
__global__
void
dt1d_y(
FLOAT *src_start, // tmpM_dev
FLOAT *dst_start, // M_dev
int *ptr_start, // tmpIx_dev
int *DID_4_array, // DID_4_array_dev
FLOAT *def_array, // def_array_dev
int NoP, // NoP
int *size_array, // pm_size_array_dev
int *numpart, // numpart_jj
int *PIDX_array, // PIDX_array_dev
int NoC, // NoC
int max_numpart, // max_numpart
int interval, // interval
int L_MAX, // L_MAX
int *error_array, // part_error_array_dev
int error_array_num, // part_error_array_num
int pid, // pid
int device_number // device_number
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int kk = blockIdx.y * blockDim.y + threadIdx.y;
int jj = threadIdx.z;
int L = blockIdx.z;
int numpart_jj;
int C_y;
if(0<=jj && jj<NoC)
{
numpart_jj = numpart[jj];
C_y = numpart_jj/device_number;
if(numpart_jj%device_number != 0){
C_y++;
}
kk = kk + pid * C_y;
if(kk < C_y * pid || kk >= C_y * (pid + 1)){
return ;
}
} else{
return ;
}
if(0<=L && L<(L_MAX-interval))
{
/* loop condition */
for(int h=0; h<error_array_num; h++) {
if(L==error_array[h]){
return;
}
}
if( 0<=kk && kk<numpart_jj)
{
int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
int dim0 = size_array[L*NoP*2 + PIDX*2];
if( idx < 0 || dim0 <= idx ) return;
int dim1 = size_array[L*NoP*2 + PIDX*2+1];
int step = dim0;
int n = dim1;
int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
FLOAT a = def_array[DID_4]; // ax
FLOAT b = def_array[DID_4+1]; // bx
/* pointer adjustment */
unsigned long long int adj_src = (unsigned long long int)src_start;
unsigned long long int adj_dst = (unsigned long long int)dst_start;
unsigned long long int adj_ptr = (unsigned long long int)ptr_start;
/* for src, dst, ptr */
/* adjust "src" to tmpM[L][jj][kk] */
/* adjust "dst" to M[L][jj][kk] */
/* adjust "ptr" to tmpIx[L][jj][kk] */
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoC; j++) {
for(int k=0; k<numpart[j]; k++) {
int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k];
int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2];
int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1];
adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
}
for(int i=0; i<jj; i++) {
for(int j=0; j<numpart[i]; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j]
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2]
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1]
adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
for(int j=0; j<kk; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j];
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2];
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1];
adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
FLOAT *src = (FLOAT *)adj_src;
FLOAT *dst = (FLOAT *)adj_dst;
int *ptr = (int *)adj_ptr;
dt_helper(src+idx, dst+idx, ptr+idx, step, 0, n-1, 0, n-1, a, b);
}
}
}
/*************************************************************/
/*************************************************************/
/* original source of dt function loop */
// for (int x = 0; x < dims[1]; x++)
// {
// dt1d(vals+XD, tmpM+XD, tmpIy+XD, 1, dims[0], ay, by);
// XD+=dims[0];
// }
// for (int y = 0; y < dims[0]; y++)
// {
// dt1d(tmpM+y, M+y, tmpIx+y, dims[0], dims[1], ax, bx);
// }
/*************************************************************/
/*************************************************************/
extern "C"
__global__
void
calc_a_score(
int IWID,
int IHEI,
FLOAT scale,
int padx_n,
int pady_n,
int *RX_array,
int *RY_array,
FLOAT *ac_score,
FLOAT *score_array,
int *ssize_array,
int NoC,
int *size_score_array
)
{
int ii = blockIdx.x * blockDim.x + threadIdx.x;
int jj = blockIdx.y * blockDim.y + threadIdx.y;
int component_jj = threadIdx.z;
if(0<=component_jj && component_jj < NoC)
{
unsigned long long int pointer_score = (unsigned long long int)score_array;
unsigned long long int pointer_ssize = (unsigned long long int)ssize_array;
unsigned long long int pointer_RX = (unsigned long long int)RX_array;
unsigned long long int pointer_RY = (unsigned long long int)RY_array;
for(int k=0; k<component_jj; k++) {
pointer_score += (unsigned long long int)size_score_array[k];
pointer_ssize += (unsigned long long int)(sizeof(int));
pointer_RX += (unsigned long long int)(sizeof(int));
pointer_RY += (unsigned long long int)(sizeof(int));
}
FLOAT *score = (FLOAT *)pointer_score;
int ssize0 = *((int *)pointer_ssize);
int ssize1 = *((int *)pointer_ssize + sizeof(int));
int RX = *((int *)pointer_RX);
int RY = *((int *)pointer_RY);
if(0<=ii && ii<IWID && 0<=jj && jj<IHEI)
{
int Xn = (int)((FLOAT)ii/scale+padx_n);
int Yn = (int)((FLOAT)jj/scale+pady_n);
if(Yn<ssize0 && Xn<ssize1)
{
FLOAT sc = score[Yn+Xn*ssize0];
int Im_Y = jj+RY;
int Im_X = ii+RX;
if(Im_Y<IHEI && Im_X<IWID)
{
FLOAT *PP = ac_score+Im_Y+Im_X*IHEI;
if(sc>*PP) *PP=sc;
}
}
}
}
/*************************************************************/
/*************************************************************/
/* original source of calc_a_score loop */
// for(int ii=0;ii<IWID;ii++)
// {
// int Xn=(int)((FLOAT)ii/scale+padx_n);
// for(int jj=0;jj<IHEI;jj++)
// {
// int Yn =(int)((FLOAT)jj/scale+pady_n);
// if(Yn<ssize[0] && Xn<ssize[1])
// {
// FLOAT sc = score[Yn+Xn*ssize[0]]; //get score of pixel
// int Im_Y = jj+RY;
// int Im_X = ii+RX;
// if(Im_Y<IHEI && Im_X<IWID)
// {
// FLOAT *PP=ac_score+Im_Y+Im_X*IHEI; //consider root rectangle size
// if(sc>*PP) *PP=sc; //save max score
// }
// }
// }
// }
/*************************************************************/
/*************************************************************/
}
#define max_i(x, y) ((x)>=(y) ? (x) : (y))
#define min_i(x, y) ((x)<=(y) ? (x) : (y))
/* atomic function dealing with double precision */
__device__
double
atomicAdd_double
(
double *address,
double val
)
{
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
}
extern "C"
__global__
void
calc_feature
(
FLOAT *SRC,
int *ISIZE,
FLOAT *HHist,
int vis_R1,
int vis_R0,
int sbin
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
const FLOAT Hcos[9]={1.0000,0.9397,0.7660,0.5000,0.1736,-0.1736,-0.5000,-0.7660,-0.9397};
const FLOAT Hsin[9]={0.0000,0.3420,0.6428,0.8660,0.9848,0.9848,0.8660,0.6428,0.3420};
if(1<=x && x<vis_R1 && 1<=y && y<vis_R0)
{
//input size
const int height=ISIZE[0]; //{268,268,134,67,233,117,203,203,177,154,89,203,154,77}
const int width=ISIZE[1]; //{448,112,224,390,195,340,170,296,257,148,340,257,129}
const int dims[2]={height,width};
//size of Histgrams and Norm calculation space size
const int blocks[2] = {(int)floor(double(height)/double(sbin)+0.5),(int)floor(double(width)/double(sbin)+0.5)};//{67,112}....sbine=4
const int BLOCK_SQ = blocks[0]*blocks[1];//{7504}...
const int vp0=dims[0]-2;
const int vp1=dims[1]-2;
const int SQUARE =dims[0]*dims[1];
const FLOAT SBIN = FLOAT(sbin);
// for(int x=1;x<vis_R[1];x++)
// {
//parameters for interpolation
FLOAT xp=((FLOAT)x+0.5)/SBIN-0.5;
int ixp=(int)floor(xp);
int ixpp=ixp+1;
int ixp_b = ixp * blocks[0];
int ixpp_b = ixp_b + blocks[0];
FLOAT vx0=xp-(FLOAT)ixp;
FLOAT vx1=1.0-vx0;
bool flag1=true,flag2=true,flagX=true;
if(ixp<0)
{
flag1=false;
flagX=false;
}
if(ixpp>=blocks[1])
{
flag2=false;
flagX=false;
}
int YC=min_i(x,vp1)*dims[0];
FLOAT *SRC_YC = SRC+YC;
// for(int y=1;y<vis_R[0];y++)
// {
//first color channel
FLOAT *s=SRC_YC+min_i(y,vp0);
FLOAT dy=*(s+1)-*(s-1);
FLOAT dx=*(s+dims[0])-*(s-dims[0]);
FLOAT v=dx*dx+dy*dy;
//second color channel
s+=SQUARE;
FLOAT dy2=*(s+1)-*(s-1);
FLOAT dx2=*(s+dims[0])-*(s-dims[0]);
FLOAT v2=dx2*dx2+dy2*dy2;
//third color channel
s+=SQUARE;
FLOAT dy3=*(s+1)-*(s-1);
FLOAT dx3=*(s+dims[0])-*(s-dims[0]);
FLOAT v3=dx3*dx3+dy3*dy3;
//pick channel with strongest gradient
if(v2>v)
{
v=v2;
dx=dx2;
dy=dy2;
}
if(v3>v)
{
v=v3;
dx=dx3;
dy=dy3;
}
FLOAT best_dot=0.0;
int best_o=0;
//snap to one of 18 orientations
for(int o=0;o<9;o++)
{
FLOAT dot=Hcos[o]*dx+Hsin[o]*dy;
if(dot>best_dot)
{
best_dot=dot;
best_o=o;
}
else if(-dot>best_dot)
{
best_dot=-dot;
best_o=o+9;
}
}
//Add to 4 histgrams around pixel using linear interpolation
FLOAT yp=((FLOAT)y+0.5)/SBIN-0.5;
int iyp=(int)floor(yp);
int iypp=iyp+1;
FLOAT vy0=yp-(FLOAT)iyp;
FLOAT vy1=1.0-vy0;
v=sqrt(v);
int ODim=best_o*BLOCK_SQ;
FLOAT *Htemp = HHist+ODim;
FLOAT vx1Xv =vx1*v;
FLOAT vx0Xv = vx0*v;
if(flagX)
{
if(iyp>=0)
{
// *(Htemp+ ixp_b+iyp)+=vy1*vx1Xv; //1-xy2
// *(Htemp+ ixpp_b+iyp)+=vy1*vx0Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iyp), (float)(vy1*vx1Xv));
atomicAdd((float*)(Htemp + ixpp_b + iyp), (float)(vy1*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iyp), (double)(vy1*vx1Xv));
atomicAdd_double((double*)(Htemp + ixpp_b + iyp), (double)(vy1*vx0Xv));
}
}
if (iypp<blocks[0])
{
// *(Htemp+ ixp_b+iypp)+=vy0*vx1Xv;
// *(Htemp+ ixpp_b+iypp)+=vy0*vx0Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iypp), (float)(vy0*vx1Xv));
atomicAdd((float*)(Htemp + ixpp_b + iypp), (float)(vy0*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iypp), (double)(vy0*vx1Xv));
atomicAdd_double((double*)(Htemp + ixpp_b + iypp), (double)(vy0*vx0Xv));
}
}
}
else if(flag1)
{
if (iyp>=0) {
// *(Htemp+ixp_b+iyp)+=vy1*vx1Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iyp), (float)(vy1*vx1Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iyp), (double)(vy1*vx1Xv));
}
}
if (iypp<blocks[0]) {
// *(Htemp+ixp_b+iypp)+=vy0*vx1Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iypp), (float)(vy0*vx1Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iypp), (double)(vy0*vx1Xv));
}
}
}
else if(flag2)
{
if(iyp>=0) {
// *(Htemp+ixpp_b+iyp)+=vy1*vx0Xv;
if(sizeof(FLOAT)==sizeof(float)) {
atomicAdd((float*)(Htemp + ixpp_b + iyp), (float)(vy1*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixpp_b + iyp), (double)(vy1*vx0Xv));
}
}
if(iypp<blocks[0]) {
// *(Htemp+ixpp_b+iypp)+=vy0*vx0Xv;
if(sizeof(FLOAT)==sizeof(float)) {
atomicAdd((float*)(Htemp + ixpp_b + iypp), (float)(vy0*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixpp_b + iypp), (double)(vy0*vx0Xv));
}
}
}
// }
//}
}
/*************************************************************/
/*************************************************************/
/* original source of calc_feature loop */
// for(int x=1;x<vis_R[1];x++)
// {
// //parameters for interpolation
// FLOAT xp=((FLOAT)x+0.5)/SBIN-0.5;
// int ixp=(int)floor(xp);
// int ixpp=ixp+1;
// int ixp_b = ixp * blocks[0];
// int ixpp_b = ixp_b + blocks[0];
// FLOAT vx0=xp-(FLOAT)ixp;
// FLOAT vx1=1.0-vx0;
// bool flag1=true,flag2=true,flagX=true;
// if(ixp<0)
// {
// flag1=false;
// flagX=false;
// }
// if(ixpp>=blocks[1])
// {
// flag2=false;
// flagX=false;
// }
// int YC=min_i(x,vp1)*dims[0];
// FLOAT *SRC_YC = SRC+YC;
// for(int y=1;y<vis_R[0];y++)
// {
// //first color channel
// FLOAT *s=SRC_YC+min_i(y,vp0);
// FLOAT dy=*(s+1)-*(s-1);
// FLOAT dx=*(s+dims[0])-*(s-dims[0]);
// FLOAT v=dx*dx+dy*dy;
// //second color channel
// s+=SQUARE;
// FLOAT dy2=*(s+1)-*(s-1);
// FLOAT dx2=*(s+dims[0])-*(s-dims[0]);
// FLOAT v2=dx2*dx2+dy2*dy2;
// //third color channel
// s+=SQUARE;
// FLOAT dy3=*(s+1)-*(s-1);
// FLOAT dx3=*(s+dims[0])-*(s-dims[0]);
// FLOAT v3=dx3*dx3+dy3*dy3;
// //pick channel with strongest gradient
// if(v2>v)
// {
// v=v2;
// dx=dx2;
// dy=dy2;
// }
// if(v3>v)
// {
// v=v3;
// dx=dx3;
// dy=dy3;
// }
// FLOAT best_dot=0.0;
// int best_o=0;
// //snap to one of 18 orientations
// for(int o=0;o<9;o++)
// {
// FLOAT dot=Hcos[o]*dx+Hsin[o]*dy;
// if(dot>best_dot)
// {
// best_dot=dot;
// best_o=o;
// }
// else if(-dot>best_dot)
// {
// best_dot=-dot;
// best_o=o+9;
// }
// }
// //Add to 4 histgrams around pixel using linear interpolation
// FLOAT yp=((FLOAT)y+0.5)/SBIN-0.5;
// int iyp=(int)floor(yp);
// int iypp=iyp+1;
// FLOAT vy0=yp-(FLOAT)iyp;
// FLOAT vy1=1.0-vy0;
// v=sqrt(v);
// int ODim=best_o*BLOCK_SQ;
// FLOAT *Htemp = HHist+ODim;
// FLOAT vx1Xv =vx1*v;
// FLOAT vx0Xv = vx0*v;
// if(flagX)
// {
// if(iyp>=0)
// {
// *(Htemp+ ixp_b+iyp)+=vy1*vx1Xv; //1-xy2
// *(Htemp+ ixpp_b+iyp)+=vy1*vx0Xv;
// }
// if (iypp<blocks[0])
// {
// *(Htemp+ ixp_b+iypp)+=vy0*vx1Xv;
// *(Htemp+ ixpp_b+iypp)+=vy0*vx0Xv;
// }
// }
// else if(flag1)
// {
// if (iyp>=0) *(Htemp+ixp_b+iyp)+=vy1*vx1Xv;
// if (iypp<blocks[0]) *(Htemp+ixp_b+iypp)+=vy0*vx1Xv;
// }
// else if(flag2)
// {
// if(iyp>=0) *(Htemp+ixpp_b+iyp)+=vy1*vx0Xv;
// if(iypp<blocks[0]) *(Htemp+ixpp_b+iypp)+=vy0*vx0Xv;
// }
// }
// }
/*************************************************************/
/*************************************************************/
}
| ab8f921a75d3a15e31271dbb315049836c284aaa.cu | #include<stdio.h>
#include<math.h>
#include"for_use_GPU.h"
/* declaration of texture memory */
//texture<FLOAT> A;
//texture<FLOAT> B;
texture<float, cudaTextureType1D, cudaReadModeElementType> A;
texture<float, cudaTextureType1D, cudaReadModeElementType> B;
texture<int2, cudaTextureType1D, cudaReadModeElementType> A_double;
texture<int2, cudaTextureType1D, cudaReadModeElementType> B_double;
//thread process
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// convolve A and B(non_symmetric)
//unsigned __stdcall process(void *thread_arg) {
/********************************************/
/* function for calculating root */
/********************************************/
extern "C"
__global__
void
process_root
(
//FLOAT *A,
//FLOAT *B,
FLOAT *C,
int *A_dims_array,
int *B_dims_array,
int len,
int interval,
int L_MAX,
int *error_array,
int error_array_num,
int pid,
int device_number
)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int ii = blockIdx.z % len;
int level = blockIdx.z / len;
int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] };
int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] };
int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 };
int C_x = C_dims[1]/device_number;
if(C_dims[1]%device_number != 0){
C_x++;
}
idx_x = idx_x + pid * C_x;
if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){
return ;
}
if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && interval <= level && level < L_MAX ) {
int num_features = A_dims[2];
const int A_SQ = A_dims[0]*A_dims[1];
const int B_SQ = B_dims[0]*B_dims[1];
FLOAT add_val = 0;
int x = idx_x;
int y = idx_y;
int XA0 = A_dims[0]*x;
/* apply loop condition */
for(int i=0; i<error_array_num; i++){
if(error_array[i] == level){
return;
}
}
/* adjust the location of pointer of C */
FLOAT *dst;
unsigned long long int pointer = (unsigned long long int)C;
for(int a=interval; a<level; a++) {
for(int b=0; b<len; b++) {
int height = A_dims_array[a*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if (height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int b=0; b<ii; b++){
int height = A_dims_array[level*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if (height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
dst = (FLOAT *)pointer;
/* adjust the location of pointer of A */
//unsigned long long int pointerA = (unsigned long long int)A;
int A_index_ini = 0;
for(int a=0; a<level; a++) {
// pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT));
A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2];
}
/* adjust the location of pointer of B */
//unsigned long long int pointerB = (unsigned long long int)B;
int B_index_ini = 0;
for(int b=0; b<ii; b++) {
// pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT));
B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2];
}
for(int f = 0; f < num_features; f++) // num_features = 31
{
// FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ;
int A_index = A_index_ini + f*A_SQ;
// FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ;
int B_index = B_index_ini + f*B_SQ;
// FLOAT *A_src2 =A_src+XA0;
A_index += XA0;
FLOAT val = 0;
// FLOAT *A_off = A_src2+y;
A_index += y;
// FLOAT *B_off = B_src;
for (int xp = 0; xp < B_dims[1]; xp++)
{
// FLOAT *A_temp = A_off;
int A_index_tmp = A_index;
// FLOAT *B_temp = B_off;
int B_index_tmp = B_index;
for (int yp = 0; yp < B_dims[0]; yp++)
{
// val += *(A_temp++) * *(B_temp++);
if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision
{
FLOAT A_val = tex1Dfetch(A, A_index_tmp);
FLOAT B_val = tex1Dfetch(B, B_index_tmp);
val += A_val * B_val;
}
else
{ // if configured to use double precision
int2 A_val = tex1Dfetch(A_double, A_index_tmp);
int2 B_val = tex1Dfetch(B_double, B_index_tmp);
val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x);
}
A_index_tmp++;
B_index_tmp++;
}
// A_off+=A_dims[0];
A_index += A_dims[0];
// B_off+=B_dims[0];
B_index += B_dims[0];
}
add_val += val;
}
*(dst + (idx_x*C_dims[0] + idx_y)) += add_val;
}
return;
}
/********************************************/
/* function for calculating part */
/********************************************/
extern "C"
__global__
void
process_part
(
//FLOAT *A,
//FLOAT *B,
FLOAT *C,
int *A_dims_array,
int *B_dims_array,
int len,
int interval,
int L_MAX,
int *error_array,
int error_array_num,
int pid,
int device_number
)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int ii = blockIdx.z % len;
int level = blockIdx.z / len;
int A_dims[3] = { A_dims_array[level*3], A_dims_array[level*3+1], A_dims_array[level*3+2] };
int B_dims[3] = { B_dims_array[ii*3], B_dims_array[ii*3+1], B_dims_array[ii*3+2] };
int C_dims[2] = { A_dims[0] - B_dims[0] + 1, A_dims[1] - B_dims[1] + 1 };
int C_x = C_dims[1]/device_number;
if(C_dims[1]%device_number != 0){
C_x++;
}
idx_x = idx_x + pid * C_x;
if(idx_x < C_x * pid || idx_x >= C_x * (pid + 1)){
return ;
}
if(0 <= ii && ii < len && 0 <= idx_x && idx_x < C_dims[1] && 0 <= idx_y && idx_y < C_dims[0] && 0 <= level && level < (L_MAX - interval) ) {
int num_features = A_dims[2];
const int A_SQ = A_dims[0]*A_dims[1];
const int B_SQ = B_dims[0]*B_dims[1];
FLOAT add_val = 0;
int x = idx_x;
int y = idx_y;
int XA0 = A_dims[0]*x;
/* apply loop condition */
for(int i=0; i<error_array_num; i++){
if(error_array[i] == level)
return;
}
/* adjust the location of pointer of C */
FLOAT *dst;
unsigned long long int pointer = (unsigned long long int)C;
for(int a=0; a<level; a++) {
for(int b=0; b<len; b++){
int height = A_dims_array[a*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[a*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if(height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int b=0; b<ii; b++){
int height = A_dims_array[level*3] - B_dims_array[b*3] + 1;
int width = A_dims_array[level*3 + 1] - B_dims_array[b*3 + 1] + 1;
/* error semantics */
if(height < 1 || width < 1){
printf("Invalid input in GPU\n");
return;
}
pointer += (unsigned long long int)(height*width*sizeof(FLOAT));
}
dst = (FLOAT *)pointer;
/* adjust the location of pointer of A */
// unsigned long long int pointerA = (unsigned long long int)A;
int A_index_ini = 0;
for(int a=0; a<level; a++) {
// pointerA += (unsigned long long int)(A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2]*sizeof(FLOAT));
A_index_ini += A_dims_array[a*3]*A_dims_array[a*3 + 1]*A_dims_array[a*3 + 2];
}
/* adjust the location of pointer of B */
// unsigned long long int pointerB = (unsigned long long int)B;
int B_index_ini = 0;
for(int b=0; b<ii; b++) {
// pointerB += (unsigned long long int)(B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2]*sizeof(FLOAT));
B_index_ini += B_dims_array[b*3]*B_dims_array[b*3 + 1]*B_dims_array[b*3 + 2];
}
for(int f = 0; f < num_features; f++) // num_features = 31
{
// FLOAT *A_src = (FLOAT *)pointerA + f*A_SQ;
int A_index = A_index_ini + f*A_SQ;
// FLOAT *B_src = (FLOAT *)pointerB + f*B_SQ;
int B_index = B_index_ini + f*B_SQ;
// FLOAT *A_src2 =A_src+XA0;
A_index += XA0;
FLOAT val = 0;
// FLOAT *A_off = A_src2+y;
A_index += y;
// FLOAT *B_off = B_src;
for (int xp = 0; xp < B_dims[1]; xp++)
{
// FLOAT *A_temp = A_off;
int A_index_tmp = A_index;
// FLOAT *B_temp = B_off;
int B_index_tmp = B_index;
for (int yp = 0; yp < B_dims[0]; yp++)
{
// val += *(A_temp++) * *(B_temp++);
if(sizeof(FLOAT) == sizeof(float)) // if configured to use single precision
{
FLOAT A_val = tex1Dfetch(A, A_index_tmp);
FLOAT B_val = tex1Dfetch(B, B_index_tmp);
val += A_val * B_val;
}
else // if configured to use double precision
{
int2 A_val = tex1Dfetch(A_double, A_index_tmp);
int2 B_val = tex1Dfetch(B_double, B_index_tmp);
val += __hiloint2double(A_val.y, A_val.x) * __hiloint2double(B_val.y, B_val.x);
}
A_index_tmp++;
B_index_tmp++;
}
// A_off+=A_dims[0];
A_index += A_dims[0];
// B_off+=B_dims[0];
B_index += B_dims[0];
}
add_val += val;
}
*(dst + (idx_x*C_dims[0] + idx_y)) += add_val;
}
return;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
__global__
void
inverse_Q(
FLOAT *src_start,
int *size_array,
int *error_array,
int error_array_num,
int NoP,
int *PIDX_array,
int *numpart,
int NoC,
int max_numpart,
int interval,
int L_MAX,
int pid,
int device_number
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int kk = blockIdx.y * blockDim.y + threadIdx.y;
int jj = threadIdx.z;
int L = blockIdx.z;
int numpart_jj;
int C_y;
if(0<=jj && jj<NoC)
{
numpart_jj = numpart[jj];
C_y = numpart_jj/device_number;
if(numpart_jj%device_number != 0){
C_y++;
}
kk = kk + pid * C_y;
if(kk < C_y * pid || kk >= C_y * (pid + 1)){
return ;
}
} else return ;
if(0<=L && L < (L_MAX-interval))
{
/* loop condition */
for(int h=0; h<error_array_num; h++) {
if(L==error_array[h]){
return;
}
}
if( 0<=kk && kk < numpart_jj )
{
int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
int dim0 = size_array[L*NoP*2 + PIDX*2];
int dim1 = size_array[L*NoP*2 + PIDX*2+1];
if( idx < 0 || dim0*dim1 <= idx) return;
/* pointer adjustment */
FLOAT *src;
unsigned long long int ptr_adjuster = (unsigned long long int)src_start;
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoP; j++) {
int height = size_array[i*NoP*2 + j*2];
int width = size_array[i*NoP*2 + j*2+1];
ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int j=0; j<PIDX; j++) {
int height = size_array[L*NoP*2 + j*2];
int width = size_array[L*NoP*2 + j*2+1];
ptr_adjuster += (unsigned long long int)(height*width*sizeof(FLOAT));
}
src = (FLOAT *)ptr_adjuster;
*(src + idx) *= -1;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// dt helper function
__device__
void
dt_helper(FLOAT *src, FLOAT *dst, int *ptr, int step, int s1, int s2, int d1, int d2, FLOAT a, FLOAT b)
{
if (d2 >= d1)
{
int d = (d1+d2) >> 1;
int ds =d*step;
int s = s1;
FLOAT src_ss = *(src+s*step);
for (int p = s1+1; p <= s2; p++)
{
int t1 = d-s;
int t2 = d-p;
if (src_ss + a*t1*t1 + b*t1 > *(src+p*step) + a*t2*t2 + b*t2)
{
s = p;
src_ss = *(src+s*step);
}
}
int D = d-s;
dst[ds] = *(src+s*step) + a*D*D + b*D;
ptr[ds] = s;
dt_helper(src, dst, ptr, step, s1, s, d1, d-1, a, b);
dt_helper(src, dst, ptr, step, s, s2, d+1, d2, a, b);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//sub function of dt
extern "C"
__global__
void
dt1d_x(
FLOAT *src_start, // part_C_dev
FLOAT *dst_start, // tmpM_dev
int *ptr_start, // tmpIy_dev
int *DID_4_array, // DID_4_array_dev
FLOAT *def_array, // def_array_dev
int *size_array, // pm_size_array_dev
int NoP, // NoP
int *PIDX_array, // PIDX_array_dev
int *error_array, // part_error_array_dev
int error_array_num, // part_error_array_num
int *numpart, // numpart_jj
int NoC, // NoC
int max_numpart, // max_numpart
int interval, // interval
int L_MAX, // L_MAX
int pid, // pid
int device_number // device_number
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int kk = blockIdx.y * blockDim.y + threadIdx.y;
int jj = threadIdx.z;
int L = blockIdx.z;
int numpart_jj;
int C_y;
if(0<=jj && jj<NoC)
{
numpart_jj = numpart[jj];
C_y = numpart_jj/device_number;
if(numpart_jj%device_number != 0){
C_y++;
}
kk = kk + pid * C_y;
if(kk < C_y * pid || kk >= C_y * (pid + 1)){
return ;
}
} else{
return ;
}
if(0<=L && L<(L_MAX-interval))
{
/* loop condition */
for(int h=0; h<error_array_num; h++) {
if(L==error_array[h]){
return;
}
}
if(0<=kk && kk<numpart_jj)
{
int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
int dim1 = size_array[L*NoP*2 + PIDX*2+1];
if( idx < 0 || dim1 <= idx ) return;
int dim0 = size_array[L*NoP*2 + PIDX*2];
int XD=0;
int step = 1;
int n = dim0;
int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
FLOAT a = def_array[DID_4+2];
FLOAT b = def_array[DID_4+3];
/* pointer adjustment */
unsigned long long int adj_src = (unsigned long long int)src_start;
unsigned long long int adj_dst = (unsigned long long int)dst_start;
unsigned long long int adj_ptr = (unsigned long long int)ptr_start;
/* for src */
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoP; j++) {
int height = size_array[i*NoP*2 + j*2];
int width = size_array[i*NoP*2 + j*2+1];
adj_src += (unsigned long long int)(height*width*sizeof(FLOAT));
}
}
for(int j=0; j<PIDX; j++) {
int height = size_array[L*NoP*2 + j*2];
int width = size_array[L*NoP*2 + j*2+1];
adj_src += (unsigned long long int)(height*width*sizeof(FLOAT));
}
/* for dst, ptr */
// adjust "dst" to tmpM[L][jj][kk]
// adjust "ptr" to tmpIy[L][jj][kk]
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoC; j++) {
for(int k=0; k<numpart[j]; k++) {
int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k];
int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2];
int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1];
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
}
for(int i=0; i<jj; i++) {
for(int j=0; j<numpart[i]; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j]
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2]
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1]
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
for(int j=0; j<kk; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j]; // PIDX_array[L][jj][j]
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2]
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1]
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
FLOAT *src = (FLOAT *)adj_src;
FLOAT *dst = (FLOAT *)adj_dst;
int *ptr = (int *)adj_ptr;
/* main calculation of di1d_x */
XD = idx*dim0;
dt_helper(src+XD, dst+XD, ptr+XD, step, 0, n-1, 0, n-1, a, b);
}
}
}
extern "C"
__global__
void
dt1d_y(
FLOAT *src_start, // tmpM_dev
FLOAT *dst_start, // M_dev
int *ptr_start, // tmpIx_dev
int *DID_4_array, // DID_4_array_dev
FLOAT *def_array, // def_array_dev
int NoP, // NoP
int *size_array, // pm_size_array_dev
int *numpart, // numpart_jj
int *PIDX_array, // PIDX_array_dev
int NoC, // NoC
int max_numpart, // max_numpart
int interval, // interval
int L_MAX, // L_MAX
int *error_array, // part_error_array_dev
int error_array_num, // part_error_array_num
int pid, // pid
int device_number // device_number
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int kk = blockIdx.y * blockDim.y + threadIdx.y;
int jj = threadIdx.z;
int L = blockIdx.z;
int numpart_jj;
int C_y;
if(0<=jj && jj<NoC)
{
numpart_jj = numpart[jj];
C_y = numpart_jj/device_number;
if(numpart_jj%device_number != 0){
C_y++;
}
kk = kk + pid * C_y;
if(kk < C_y * pid || kk >= C_y * (pid + 1)){
return ;
}
} else{
return ;
}
if(0<=L && L<(L_MAX-interval))
{
/* loop condition */
for(int h=0; h<error_array_num; h++) {
if(L==error_array[h]){
return;
}
}
if( 0<=kk && kk<numpart_jj)
{
int PIDX = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
int dim0 = size_array[L*NoP*2 + PIDX*2];
if( idx < 0 || dim0 <= idx ) return;
int dim1 = size_array[L*NoP*2 + PIDX*2+1];
int step = dim0;
int n = dim1;
int DID_4 = DID_4_array[L*(NoC*max_numpart) + jj*max_numpart + kk];
FLOAT a = def_array[DID_4]; // ax
FLOAT b = def_array[DID_4+1]; // bx
/* pointer adjustment */
unsigned long long int adj_src = (unsigned long long int)src_start;
unsigned long long int adj_dst = (unsigned long long int)dst_start;
unsigned long long int adj_ptr = (unsigned long long int)ptr_start;
/* for src, dst, ptr */
/* adjust "src" to tmpM[L][jj][kk] */
/* adjust "dst" to M[L][jj][kk] */
/* adjust "ptr" to tmpIx[L][jj][kk] */
for(int i=0; i<L; i++) {
/* apply error condition */
int error_flag=0;
for(int h=0; h<error_array_num; h++) {
if(i==error_array[h]){
error_flag = 1;
}
}
if(error_flag != 0) {
continue;
}
for(int j=0; j<NoC; j++) {
for(int k=0; k<numpart[j]; k++) {
int PIDX_tmp = PIDX_array[i*(NoC*max_numpart) + j*max_numpart + k];
int dims0_tmp = size_array[i*NoP*2 + PIDX_tmp*2];
int dims1_tmp = size_array[i*NoP*2 + PIDX_tmp*2+1];
adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
}
for(int i=0; i<jj; i++) {
for(int j=0; j<numpart[i]; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + i*max_numpart + j]; // PIDX_array[L][i][j]
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2]; // size_array[L][PIDX_tmp*2]
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1]; // size_array[L][PIDX_tmp*2+1]
adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
}
for(int j=0; j<kk; j++) {
int PIDX_tmp = PIDX_array[L*(NoC*max_numpart) + jj*max_numpart + j];
int dims0_tmp = size_array[L*NoP*2 + PIDX_tmp*2];
int dims1_tmp = size_array[L*NoP*2 + PIDX_tmp*2+1];
adj_src += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_dst += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(FLOAT));
adj_ptr += (unsigned long long int)(dims0_tmp*dims1_tmp*sizeof(int));
}
FLOAT *src = (FLOAT *)adj_src;
FLOAT *dst = (FLOAT *)adj_dst;
int *ptr = (int *)adj_ptr;
dt_helper(src+idx, dst+idx, ptr+idx, step, 0, n-1, 0, n-1, a, b);
}
}
}
/*************************************************************/
/*************************************************************/
/* original source of dt function loop */
// for (int x = 0; x < dims[1]; x++)
// {
// dt1d(vals+XD, tmpM+XD, tmpIy+XD, 1, dims[0], ay, by);
// XD+=dims[0];
// }
// for (int y = 0; y < dims[0]; y++)
// {
// dt1d(tmpM+y, M+y, tmpIx+y, dims[0], dims[1], ax, bx);
// }
/*************************************************************/
/*************************************************************/
extern "C"
__global__
void
calc_a_score(
int IWID,
int IHEI,
FLOAT scale,
int padx_n,
int pady_n,
int *RX_array,
int *RY_array,
FLOAT *ac_score,
FLOAT *score_array,
int *ssize_array,
int NoC,
int *size_score_array
)
{
int ii = blockIdx.x * blockDim.x + threadIdx.x;
int jj = blockIdx.y * blockDim.y + threadIdx.y;
int component_jj = threadIdx.z;
if(0<=component_jj && component_jj < NoC)
{
unsigned long long int pointer_score = (unsigned long long int)score_array;
unsigned long long int pointer_ssize = (unsigned long long int)ssize_array;
unsigned long long int pointer_RX = (unsigned long long int)RX_array;
unsigned long long int pointer_RY = (unsigned long long int)RY_array;
for(int k=0; k<component_jj; k++) {
pointer_score += (unsigned long long int)size_score_array[k];
pointer_ssize += (unsigned long long int)(sizeof(int));
pointer_RX += (unsigned long long int)(sizeof(int));
pointer_RY += (unsigned long long int)(sizeof(int));
}
FLOAT *score = (FLOAT *)pointer_score;
int ssize0 = *((int *)pointer_ssize);
int ssize1 = *((int *)pointer_ssize + sizeof(int));
int RX = *((int *)pointer_RX);
int RY = *((int *)pointer_RY);
if(0<=ii && ii<IWID && 0<=jj && jj<IHEI)
{
int Xn = (int)((FLOAT)ii/scale+padx_n);
int Yn = (int)((FLOAT)jj/scale+pady_n);
if(Yn<ssize0 && Xn<ssize1)
{
FLOAT sc = score[Yn+Xn*ssize0];
int Im_Y = jj+RY;
int Im_X = ii+RX;
if(Im_Y<IHEI && Im_X<IWID)
{
FLOAT *PP = ac_score+Im_Y+Im_X*IHEI;
if(sc>*PP) *PP=sc;
}
}
}
}
/*************************************************************/
/*************************************************************/
/* original source of calc_a_score loop */
// for(int ii=0;ii<IWID;ii++)
// {
// int Xn=(int)((FLOAT)ii/scale+padx_n);
// for(int jj=0;jj<IHEI;jj++)
// {
// int Yn =(int)((FLOAT)jj/scale+pady_n);
// if(Yn<ssize[0] && Xn<ssize[1])
// {
// FLOAT sc = score[Yn+Xn*ssize[0]]; //get score of pixel
// int Im_Y = jj+RY;
// int Im_X = ii+RX;
// if(Im_Y<IHEI && Im_X<IWID)
// {
// FLOAT *PP=ac_score+Im_Y+Im_X*IHEI; //consider root rectangle size
// if(sc>*PP) *PP=sc; //save max score
// }
// }
// }
// }
/*************************************************************/
/*************************************************************/
}
#define max_i(x, y) ((x)>=(y) ? (x) : (y))
#define min_i(x, y) ((x)<=(y) ? (x) : (y))
/* atomic function dealing with double precision */
__device__
double
atomicAdd_double
(
double *address,
double val
)
{
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
}
extern "C"
__global__
void
calc_feature
(
FLOAT *SRC,
int *ISIZE,
FLOAT *HHist,
int vis_R1,
int vis_R0,
int sbin
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
const FLOAT Hcos[9]={1.0000,0.9397,0.7660,0.5000,0.1736,-0.1736,-0.5000,-0.7660,-0.9397};
const FLOAT Hsin[9]={0.0000,0.3420,0.6428,0.8660,0.9848,0.9848,0.8660,0.6428,0.3420};
if(1<=x && x<vis_R1 && 1<=y && y<vis_R0)
{
//input size
const int height=ISIZE[0]; //{268,268,134,67,233,117,203,203,177,154,89,203,154,77}
const int width=ISIZE[1]; //{448,112,224,390,195,340,170,296,257,148,340,257,129}
const int dims[2]={height,width};
//size of Histgrams and Norm calculation space size
const int blocks[2] = {(int)floor(double(height)/double(sbin)+0.5),(int)floor(double(width)/double(sbin)+0.5)};//{67,112}....sbine=4
const int BLOCK_SQ = blocks[0]*blocks[1];//{7504}...
const int vp0=dims[0]-2;
const int vp1=dims[1]-2;
const int SQUARE =dims[0]*dims[1];
const FLOAT SBIN = FLOAT(sbin);
// for(int x=1;x<vis_R[1];x++)
// {
//parameters for interpolation
FLOAT xp=((FLOAT)x+0.5)/SBIN-0.5;
int ixp=(int)floor(xp);
int ixpp=ixp+1;
int ixp_b = ixp * blocks[0];
int ixpp_b = ixp_b + blocks[0];
FLOAT vx0=xp-(FLOAT)ixp;
FLOAT vx1=1.0-vx0;
bool flag1=true,flag2=true,flagX=true;
if(ixp<0)
{
flag1=false;
flagX=false;
}
if(ixpp>=blocks[1])
{
flag2=false;
flagX=false;
}
int YC=min_i(x,vp1)*dims[0];
FLOAT *SRC_YC = SRC+YC;
// for(int y=1;y<vis_R[0];y++)
// {
//first color channel
FLOAT *s=SRC_YC+min_i(y,vp0);
FLOAT dy=*(s+1)-*(s-1);
FLOAT dx=*(s+dims[0])-*(s-dims[0]);
FLOAT v=dx*dx+dy*dy;
//second color channel
s+=SQUARE;
FLOAT dy2=*(s+1)-*(s-1);
FLOAT dx2=*(s+dims[0])-*(s-dims[0]);
FLOAT v2=dx2*dx2+dy2*dy2;
//third color channel
s+=SQUARE;
FLOAT dy3=*(s+1)-*(s-1);
FLOAT dx3=*(s+dims[0])-*(s-dims[0]);
FLOAT v3=dx3*dx3+dy3*dy3;
//pick channel with strongest gradient
if(v2>v)
{
v=v2;
dx=dx2;
dy=dy2;
}
if(v3>v)
{
v=v3;
dx=dx3;
dy=dy3;
}
FLOAT best_dot=0.0;
int best_o=0;
//snap to one of 18 orientations
for(int o=0;o<9;o++)
{
FLOAT dot=Hcos[o]*dx+Hsin[o]*dy;
if(dot>best_dot)
{
best_dot=dot;
best_o=o;
}
else if(-dot>best_dot)
{
best_dot=-dot;
best_o=o+9;
}
}
//Add to 4 histgrams around pixel using linear interpolation
FLOAT yp=((FLOAT)y+0.5)/SBIN-0.5;
int iyp=(int)floor(yp);
int iypp=iyp+1;
FLOAT vy0=yp-(FLOAT)iyp;
FLOAT vy1=1.0-vy0;
v=sqrt(v);
int ODim=best_o*BLOCK_SQ;
FLOAT *Htemp = HHist+ODim;
FLOAT vx1Xv =vx1*v;
FLOAT vx0Xv = vx0*v;
if(flagX)
{
if(iyp>=0)
{
// *(Htemp+ ixp_b+iyp)+=vy1*vx1Xv; //1-少数をxyでかけたものにエッジ強度の2乗をかけたもの
// *(Htemp+ ixpp_b+iyp)+=vy1*vx0Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iyp), (float)(vy1*vx1Xv));
atomicAdd((float*)(Htemp + ixpp_b + iyp), (float)(vy1*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iyp), (double)(vy1*vx1Xv));
atomicAdd_double((double*)(Htemp + ixpp_b + iyp), (double)(vy1*vx0Xv));
}
}
if (iypp<blocks[0])
{
// *(Htemp+ ixp_b+iypp)+=vy0*vx1Xv;
// *(Htemp+ ixpp_b+iypp)+=vy0*vx0Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iypp), (float)(vy0*vx1Xv));
atomicAdd((float*)(Htemp + ixpp_b + iypp), (float)(vy0*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iypp), (double)(vy0*vx1Xv));
atomicAdd_double((double*)(Htemp + ixpp_b + iypp), (double)(vy0*vx0Xv));
}
}
}
else if(flag1)
{
if (iyp>=0) {
// *(Htemp+ixp_b+iyp)+=vy1*vx1Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iyp), (float)(vy1*vx1Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iyp), (double)(vy1*vx1Xv));
}
}
if (iypp<blocks[0]) {
// *(Htemp+ixp_b+iypp)+=vy0*vx1Xv;
if(sizeof(FLOAT) == sizeof(float)) {
atomicAdd((float*)(Htemp + ixp_b + iypp), (float)(vy0*vx1Xv));
}else{
atomicAdd_double((double*)(Htemp + ixp_b + iypp), (double)(vy0*vx1Xv));
}
}
}
else if(flag2)
{
if(iyp>=0) {
// *(Htemp+ixpp_b+iyp)+=vy1*vx0Xv;
if(sizeof(FLOAT)==sizeof(float)) {
atomicAdd((float*)(Htemp + ixpp_b + iyp), (float)(vy1*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixpp_b + iyp), (double)(vy1*vx0Xv));
}
}
if(iypp<blocks[0]) {
// *(Htemp+ixpp_b+iypp)+=vy0*vx0Xv;
if(sizeof(FLOAT)==sizeof(float)) {
atomicAdd((float*)(Htemp + ixpp_b + iypp), (float)(vy0*vx0Xv));
}else{
atomicAdd_double((double*)(Htemp + ixpp_b + iypp), (double)(vy0*vx0Xv));
}
}
}
// }
//}
}
/*************************************************************/
/*************************************************************/
/* original source of calc_feature loop */
// for(int x=1;x<vis_R[1];x++)
// {
// //parameters for interpolation
// FLOAT xp=((FLOAT)x+0.5)/SBIN-0.5;
// int ixp=(int)floor(xp);
// int ixpp=ixp+1;
// int ixp_b = ixp * blocks[0];
// int ixpp_b = ixp_b + blocks[0];
// FLOAT vx0=xp-(FLOAT)ixp;
// FLOAT vx1=1.0-vx0;
// bool flag1=true,flag2=true,flagX=true;
// if(ixp<0)
// {
// flag1=false;
// flagX=false;
// }
// if(ixpp>=blocks[1])
// {
// flag2=false;
// flagX=false;
// }
// int YC=min_i(x,vp1)*dims[0];
// FLOAT *SRC_YC = SRC+YC;
// for(int y=1;y<vis_R[0];y++)
// {
// //first color channel
// FLOAT *s=SRC_YC+min_i(y,vp0);
// FLOAT dy=*(s+1)-*(s-1);
// FLOAT dx=*(s+dims[0])-*(s-dims[0]);
// FLOAT v=dx*dx+dy*dy;
// //second color channel
// s+=SQUARE;
// FLOAT dy2=*(s+1)-*(s-1);
// FLOAT dx2=*(s+dims[0])-*(s-dims[0]);
// FLOAT v2=dx2*dx2+dy2*dy2;
// //third color channel
// s+=SQUARE;
// FLOAT dy3=*(s+1)-*(s-1);
// FLOAT dx3=*(s+dims[0])-*(s-dims[0]);
// FLOAT v3=dx3*dx3+dy3*dy3;
// //pick channel with strongest gradient
// if(v2>v)
// {
// v=v2;
// dx=dx2;
// dy=dy2;
// }
// if(v3>v)
// {
// v=v3;
// dx=dx3;
// dy=dy3;
// }
// FLOAT best_dot=0.0;
// int best_o=0;
// //snap to one of 18 orientations
// for(int o=0;o<9;o++)
// {
// FLOAT dot=Hcos[o]*dx+Hsin[o]*dy;
// if(dot>best_dot)
// {
// best_dot=dot;
// best_o=o;
// }
// else if(-dot>best_dot)
// {
// best_dot=-dot;
// best_o=o+9;
// }
// }
// //Add to 4 histgrams around pixel using linear interpolation
// FLOAT yp=((FLOAT)y+0.5)/SBIN-0.5;
// int iyp=(int)floor(yp);
// int iypp=iyp+1;
// FLOAT vy0=yp-(FLOAT)iyp;
// FLOAT vy1=1.0-vy0;
// v=sqrt(v);
// int ODim=best_o*BLOCK_SQ;
// FLOAT *Htemp = HHist+ODim;
// FLOAT vx1Xv =vx1*v;
// FLOAT vx0Xv = vx0*v;
// if(flagX)
// {
// if(iyp>=0)
// {
// *(Htemp+ ixp_b+iyp)+=vy1*vx1Xv; //1-少数をxyでかけたものにエッジ強度の2乗をかけたもの
// *(Htemp+ ixpp_b+iyp)+=vy1*vx0Xv;
// }
// if (iypp<blocks[0])
// {
// *(Htemp+ ixp_b+iypp)+=vy0*vx1Xv;
// *(Htemp+ ixpp_b+iypp)+=vy0*vx0Xv;
// }
// }
// else if(flag1)
// {
// if (iyp>=0) *(Htemp+ixp_b+iyp)+=vy1*vx1Xv;
// if (iypp<blocks[0]) *(Htemp+ixp_b+iypp)+=vy0*vx1Xv;
// }
// else if(flag2)
// {
// if(iyp>=0) *(Htemp+ixpp_b+iyp)+=vy1*vx0Xv;
// if(iypp<blocks[0]) *(Htemp+ixpp_b+iypp)+=vy0*vx0Xv;
// }
// }
// }
/*************************************************************/
/*************************************************************/
}
|
f616f2f2b120774523ee9d277551d0bb22047031.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
} | f616f2f2b120774523ee9d277551d0bb22047031.cu | #include "includes.h"
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
} |
7572d1a769cd38ca8a344add89c5a62aa8b4a3e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "build-chains.h"
#include "mem-search.h"
#include "options.h"
#include "pipeline.h"
#include "util.h"
#include <nvbio/basic/numbers.h>
#include <nvbio/basic/algorithms.h>
#include <nvbio/basic/priority_queue.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/transform_iterator.h>
#include <nvbio/basic/vector_view.h>
#include <nvbio/basic/primitives.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
using namespace nvbio;
// a functor to extract the read id from a mem
struct mem_read_id_functor
{
typedef mem_state::mem_type argument_type;
typedef uint32 result_type;
NVBIO_HOST_DEVICE
uint32 operator() (const argument_type mem) const { return mem.string_id(); }
};
// a class to keep track of a chain
struct chain
{
// construct an empty chain
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
chain() : id(uint32(-1)) {}
// construct a new chain from a single seed
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
chain(const uint32 _id, const mem_state::mem_type seed) :
id( _id ),
ref( seed.index_pos() ),
span_beg( seed.span().x ),
last_ref( seed.index_pos() ),
last_span( seed.span() )
{}
// test whether we can merge the given mem into this chain
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool merge(const mem_state::mem_type seed, const uint32 w, const uint32 max_chain_gap)
{
const uint32 seed_len = seed.span().y - seed.span().x;
const uint32 last_len = last_span.y - last_span.x;
const uint32 rbeg = ref;
const uint32 rend = last_ref + last_len;
// check whether seed is contained in the chain
if (seed.span().x >= span_beg && seed.span().y <= last_span.y && seed.index_pos() >= rbeg && seed.index_pos() + seed_len <= rend)
return true; // contained seed; do nothing
const int32 x = seed.span().x - last_span.x; // always non-negative
const int32 y = seed.index_pos() - last_ref;
if ((y >= 0) && (x - y <= w) && (x - last_len < max_chain_gap) && (y - last_len < max_chain_gap))
{
// grow the chain
last_span = seed.span();
last_ref = seed.index_pos();
return true;
}
return false;
}
uint32 id; // chain id
uint32 ref; // reference coordinate of the first seed in the chain
uint32 span_beg; // read span begin
uint32 last_ref; // the reference coordinate of the last seed in the chain
uint2 last_span; // the read span of the last seed in the chain
};
struct chain_compare
{
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator() (const chain& chain1, const chain& chain2) const
{
// compare by the reference coordinate of the first seed of each chain
return chain1.ref < chain2.ref;
}
};
// assign a chain id to all MEMs for the current pipeline::chunk of reads
__global__
void build_chains_kernel(
const read_chunk chunk, // the current sub-batch
const uint32 pass_number, // the pass number - we process up to N seeds per pass
const uint32 n_active, // the number of active reads in this pass
const uint32* active_reads, // the set of active reads
uint8* active_flags, // the output set of active read flags
const uint32 w, // w parameter
const uint32 max_chain_gap, // max chain gap parameter
const uint32 n_mems, // the total number of MEMs for this chunk of reads
const mem_state::mem_type* mems, // the MEMs for this chunk of reads
const uint32* mems_index, // a sorting index into the MEMs specifying the processing order
uint64* mems_chains) // the output chain IDs corresponding to the sorted MEMs
{
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id >= n_active)
return;
const uint32 read_id = active_reads[ thread_id ];
// find the first seed belonging to this read
const uint32 mem_begin = uint32( nvbio::lower_bound(
read_id,
nvbio::make_transform_iterator( mems, mem_read_id_functor() ),
n_mems ) - nvbio::make_transform_iterator( mems, mem_read_id_functor() ) );
// find the first seed belonging to the next read
const uint32 mem_end = uint32( nvbio::lower_bound(
read_id+1u,
nvbio::make_transform_iterator( mems, mem_read_id_functor() ),
n_mems ) - nvbio::make_transform_iterator( mems, mem_read_id_functor() ) );
// the maximum amount of chains we can output in one pass
const uint32 MAX_CHAINS = 128;
// keep a priority queue of the chains organized by the reference coordinate of their leftmost seed
typedef nvbio::vector_view<chain*> chain_vector_type;
typedef nvbio::priority_queue<chain, chain_vector_type, chain_compare> chain_queue_type;
chain chain_queue_storage[MAX_CHAINS+1];
chain_queue_type chain_queue( chain_vector_type( 0u, chain_queue_storage ) );
// keep a counter tracking the number of chains that get created
//
// NOTE: here we conservatively assume that in the previous passes we have
// created the maximum number of chains, so as to avoid assigning an already
// taken ID to a new chain (which would result in merging potentially unrelated
// chains)
uint64 n_chains = pass_number * MAX_CHAINS;
// compute the first and ending MEM to process in this pass
const uint32 mem_batch_begin = mem_begin + pass_number * MAX_CHAINS;
const uint32 mem_batch_end = nvbio::min( mem_batch_begin + MAX_CHAINS, mem_end );
// process the seeds in order
for (uint32 i = mem_batch_begin; i < mem_batch_end; ++i)
{
const uint32 seed_idx = mems_index[i];
const mem_state::mem_type seed = mems[ seed_idx ];
// the chain id for this seed, to be determined
uint32 chain_id;
// insert seed
if (chain_queue.empty())
{
// get a new chain id
chain_id = n_chains++;
// build a new chain
chain_queue.push( chain( chain_id, seed ) );
}
else
{
// find the closest chain...
chain_queue_type::iterator chain_it = chain_queue.upper_bound( chain( 0u, seed ) );
// and test whether we can merge this seed into it
if (chain_it != chain_queue.end() &&
chain_it->merge( seed, w, max_chain_gap ) == false)
{
// get a new chain id
chain_id = n_chains++;
// build a new chain
chain_queue.push( chain( chain_id, seed ) );
}
else
{
// merge with the existing chain
chain_id = chain_it->id;
}
}
// write out the chain id (OR'd with the read id)
mems_chains[i] = chain_id | (uint64( read_id ) << 32);
}
// write out whether we need more passes
active_flags[ thread_id ] = (mem_batch_begin < mem_end) ? 1u : 0u;
}
// build chains for the current pipeline::chunk of reads
void build_chains(pipeline_state *pipeline, const io::SequenceDataDevice *reads)
{
const ScopedTimer<float> timer( &pipeline->stats.chain_time ); // keep track of the time spent here
struct chains_state<device_tag> *chn = &pipeline->chn;
const uint32 n_reads = pipeline->chunk.read_end - pipeline->chunk.read_begin;
const uint32 n_mems = pipeline->chunk.mem_end - pipeline->chunk.mem_begin;
// skip pathological cases
if (n_mems == 0u)
return;
//
// Here we are going to run multiple passes of the same kernel, as we cannot fit
// all chains in local memory at once...
//
// prepare some ping-pong queues for tracking active reads that need more passes
nvbio::vector<device_tag,uint32> active_reads( n_reads );
nvbio::vector<device_tag,uint8> active_flags( n_reads );
nvbio::vector<device_tag,uint32> out_reads( n_reads );
nvbio::vector<device_tag,uint8> temp_storage;
// initialize the active reads queue
thrust::copy(
thrust::make_counting_iterator<uint32>(0u) + pipeline->chunk.read_begin,
thrust::make_counting_iterator<uint32>(0u) + pipeline->chunk.read_end,
active_reads.begin() );
uint32 n_active = n_reads;
for (uint32 pass_number = 0u; n_active; ++pass_number)
{
const uint32 block_dim = 128;
const uint32 n_blocks = util::divide_ri( n_active, block_dim );
// assign a chain id to each mem
hipLaunchKernelGGL(( build_chains_kernel), dim3(n_blocks), dim3(block_dim), 0, 0,
pipeline->chunk,
pass_number,
n_active,
nvbio::plain_view( active_reads ),
nvbio::plain_view( active_flags ),
command_line_options.w,
command_line_options.max_chain_gap,
n_mems,
nvbio::plain_view( chn->mems ),
nvbio::plain_view( chn->mems_index ),
nvbio::plain_view( chn->mems_chain ) );
optional_device_synchronize();
cuda::check_error("build-chains kernel");
// shrink the set of active reads
n_active = copy_flagged(
n_active, // the number of input elements
active_reads.begin(), // the input sequence of elements to copy
active_flags.begin(), // the input sequence of copy flags
out_reads.begin(), // the output sequence of copied elements
temp_storage ); // some temporary storage
active_reads.swap( out_reads );
}
// sort mems by chain id
// NOTE: it's important here to use a stable-sort, so as to guarantee preserving
// the ordering by left-coordinate of the MEMs
thrust::sort_by_key( // TODO: this is slow, switch to nvbio::cuda::SortEnactor
chn->mems_chain.begin(),
chn->mems_chain.begin() + n_mems,
chn->mems_index.begin() );
optional_device_synchronize();
nvbio::cuda::check_error("build-chains kernel");
}
| 7572d1a769cd38ca8a344add89c5a62aa8b4a3e6.cu | /*
* nvbio
* Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "build-chains.h"
#include "mem-search.h"
#include "options.h"
#include "pipeline.h"
#include "util.h"
#include <nvbio/basic/numbers.h>
#include <nvbio/basic/algorithms.h>
#include <nvbio/basic/priority_queue.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/transform_iterator.h>
#include <nvbio/basic/vector_view.h>
#include <nvbio/basic/primitives.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
using namespace nvbio;
// a functor to extract the read id from a mem
struct mem_read_id_functor
{
typedef mem_state::mem_type argument_type;
typedef uint32 result_type;
NVBIO_HOST_DEVICE
uint32 operator() (const argument_type mem) const { return mem.string_id(); }
};
// a class to keep track of a chain
struct chain
{
// construct an empty chain
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
chain() : id(uint32(-1)) {}
// construct a new chain from a single seed
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
chain(const uint32 _id, const mem_state::mem_type seed) :
id( _id ),
ref( seed.index_pos() ),
span_beg( seed.span().x ),
last_ref( seed.index_pos() ),
last_span( seed.span() )
{}
// test whether we can merge the given mem into this chain
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool merge(const mem_state::mem_type seed, const uint32 w, const uint32 max_chain_gap)
{
const uint32 seed_len = seed.span().y - seed.span().x;
const uint32 last_len = last_span.y - last_span.x;
const uint32 rbeg = ref;
const uint32 rend = last_ref + last_len;
// check whether seed is contained in the chain
if (seed.span().x >= span_beg && seed.span().y <= last_span.y && seed.index_pos() >= rbeg && seed.index_pos() + seed_len <= rend)
return true; // contained seed; do nothing
const int32 x = seed.span().x - last_span.x; // always non-negative
const int32 y = seed.index_pos() - last_ref;
if ((y >= 0) && (x - y <= w) && (x - last_len < max_chain_gap) && (y - last_len < max_chain_gap))
{
// grow the chain
last_span = seed.span();
last_ref = seed.index_pos();
return true;
}
return false;
}
uint32 id; // chain id
uint32 ref; // reference coordinate of the first seed in the chain
uint32 span_beg; // read span begin
uint32 last_ref; // the reference coordinate of the last seed in the chain
uint2 last_span; // the read span of the last seed in the chain
};
struct chain_compare
{
NVBIO_FORCEINLINE NVBIO_HOST_DEVICE
bool operator() (const chain& chain1, const chain& chain2) const
{
// compare by the reference coordinate of the first seed of each chain
return chain1.ref < chain2.ref;
}
};
// assign a chain id to all MEMs for the current pipeline::chunk of reads
__global__
void build_chains_kernel(
const read_chunk chunk, // the current sub-batch
const uint32 pass_number, // the pass number - we process up to N seeds per pass
const uint32 n_active, // the number of active reads in this pass
const uint32* active_reads, // the set of active reads
uint8* active_flags, // the output set of active read flags
const uint32 w, // w parameter
const uint32 max_chain_gap, // max chain gap parameter
const uint32 n_mems, // the total number of MEMs for this chunk of reads
const mem_state::mem_type* mems, // the MEMs for this chunk of reads
const uint32* mems_index, // a sorting index into the MEMs specifying the processing order
uint64* mems_chains) // the output chain IDs corresponding to the sorted MEMs
{
const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id >= n_active)
return;
const uint32 read_id = active_reads[ thread_id ];
// find the first seed belonging to this read
const uint32 mem_begin = uint32( nvbio::lower_bound(
read_id,
nvbio::make_transform_iterator( mems, mem_read_id_functor() ),
n_mems ) - nvbio::make_transform_iterator( mems, mem_read_id_functor() ) );
// find the first seed belonging to the next read
const uint32 mem_end = uint32( nvbio::lower_bound(
read_id+1u,
nvbio::make_transform_iterator( mems, mem_read_id_functor() ),
n_mems ) - nvbio::make_transform_iterator( mems, mem_read_id_functor() ) );
// the maximum amount of chains we can output in one pass
const uint32 MAX_CHAINS = 128;
// keep a priority queue of the chains organized by the reference coordinate of their leftmost seed
typedef nvbio::vector_view<chain*> chain_vector_type;
typedef nvbio::priority_queue<chain, chain_vector_type, chain_compare> chain_queue_type;
chain chain_queue_storage[MAX_CHAINS+1];
chain_queue_type chain_queue( chain_vector_type( 0u, chain_queue_storage ) );
// keep a counter tracking the number of chains that get created
//
// NOTE: here we conservatively assume that in the previous passes we have
// created the maximum number of chains, so as to avoid assigning an already
// taken ID to a new chain (which would result in merging potentially unrelated
// chains)
uint64 n_chains = pass_number * MAX_CHAINS;
// compute the first and ending MEM to process in this pass
const uint32 mem_batch_begin = mem_begin + pass_number * MAX_CHAINS;
const uint32 mem_batch_end = nvbio::min( mem_batch_begin + MAX_CHAINS, mem_end );
// process the seeds in order
for (uint32 i = mem_batch_begin; i < mem_batch_end; ++i)
{
const uint32 seed_idx = mems_index[i];
const mem_state::mem_type seed = mems[ seed_idx ];
// the chain id for this seed, to be determined
uint32 chain_id;
// insert seed
if (chain_queue.empty())
{
// get a new chain id
chain_id = n_chains++;
// build a new chain
chain_queue.push( chain( chain_id, seed ) );
}
else
{
// find the closest chain...
chain_queue_type::iterator chain_it = chain_queue.upper_bound( chain( 0u, seed ) );
// and test whether we can merge this seed into it
if (chain_it != chain_queue.end() &&
chain_it->merge( seed, w, max_chain_gap ) == false)
{
// get a new chain id
chain_id = n_chains++;
// build a new chain
chain_queue.push( chain( chain_id, seed ) );
}
else
{
// merge with the existing chain
chain_id = chain_it->id;
}
}
// write out the chain id (OR'd with the read id)
mems_chains[i] = chain_id | (uint64( read_id ) << 32);
}
// write out whether we need more passes
active_flags[ thread_id ] = (mem_batch_begin < mem_end) ? 1u : 0u;
}
// build chains for the current pipeline::chunk of reads
void build_chains(pipeline_state *pipeline, const io::SequenceDataDevice *reads)
{
const ScopedTimer<float> timer( &pipeline->stats.chain_time ); // keep track of the time spent here
struct chains_state<device_tag> *chn = &pipeline->chn;
const uint32 n_reads = pipeline->chunk.read_end - pipeline->chunk.read_begin;
const uint32 n_mems = pipeline->chunk.mem_end - pipeline->chunk.mem_begin;
// skip pathological cases
if (n_mems == 0u)
return;
//
// Here we are going to run multiple passes of the same kernel, as we cannot fit
// all chains in local memory at once...
//
// prepare some ping-pong queues for tracking active reads that need more passes
nvbio::vector<device_tag,uint32> active_reads( n_reads );
nvbio::vector<device_tag,uint8> active_flags( n_reads );
nvbio::vector<device_tag,uint32> out_reads( n_reads );
nvbio::vector<device_tag,uint8> temp_storage;
// initialize the active reads queue
thrust::copy(
thrust::make_counting_iterator<uint32>(0u) + pipeline->chunk.read_begin,
thrust::make_counting_iterator<uint32>(0u) + pipeline->chunk.read_end,
active_reads.begin() );
uint32 n_active = n_reads;
for (uint32 pass_number = 0u; n_active; ++pass_number)
{
const uint32 block_dim = 128;
const uint32 n_blocks = util::divide_ri( n_active, block_dim );
// assign a chain id to each mem
build_chains_kernel<<<n_blocks, block_dim>>>(
pipeline->chunk,
pass_number,
n_active,
nvbio::plain_view( active_reads ),
nvbio::plain_view( active_flags ),
command_line_options.w,
command_line_options.max_chain_gap,
n_mems,
nvbio::plain_view( chn->mems ),
nvbio::plain_view( chn->mems_index ),
nvbio::plain_view( chn->mems_chain ) );
optional_device_synchronize();
cuda::check_error("build-chains kernel");
// shrink the set of active reads
n_active = copy_flagged(
n_active, // the number of input elements
active_reads.begin(), // the input sequence of elements to copy
active_flags.begin(), // the input sequence of copy flags
out_reads.begin(), // the output sequence of copied elements
temp_storage ); // some temporary storage
active_reads.swap( out_reads );
}
// sort mems by chain id
// NOTE: it's important here to use a stable-sort, so as to guarantee preserving
// the ordering by left-coordinate of the MEMs
thrust::sort_by_key( // TODO: this is slow, switch to nvbio::cuda::SortEnactor
chn->mems_chain.begin(),
chn->mems_chain.begin() + n_mems,
chn->mems_index.begin() );
optional_device_synchronize();
nvbio::cuda::check_error("build-chains kernel");
}
|
268a858567bc999cd47e7ac72b9d2d5d2aeadf96.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Wrapper functions for Nvgraph
*
* @file nvgraph_gdf.cu
* ---------------------------------------------------------------------------**/
#include <nvgraph_gdf.h>
#include <thrust/device_vector.h>
#include <ctime>
#include "utilities/error_utils.h"
//RMM:
//
#include <rmm_utils.h>
template<typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
gdf_error nvgraph2gdf_error(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_SUCCESS:
return GDF_SUCCESS;
case NVGRAPH_STATUS_NOT_INITIALIZED:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_INVALID_VALUE:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
return GDF_UNSUPPORTED_DTYPE;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
return GDF_INVALID_API_CALL;
default:
return GDF_CUDA_ERROR;
}
}
gdf_error nvgraph2gdf_error_verbose(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_NOT_INITIALIZED:
std::cerr << "nvGRAPH not initialized";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ALLOC_FAILED:
std::cerr << "nvGRAPH alloc failed";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INVALID_VALUE:
std::cerr << "nvGRAPH invalid value";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ARCH_MISMATCH:
std::cerr << "nvGRAPH arch mismatch";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_MAPPING_ERROR:
std::cerr << "nvGRAPH mapping error";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_EXECUTION_FAILED:
std::cerr << "nvGRAPH execution failed";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INTERNAL_ERROR:
std::cerr << "nvGRAPH internal error";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH type not supported";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_NOT_CONVERGED:
std::cerr << "nvGRAPH algorithm failed to converge";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH graph type not supported";
return GDF_CUDA_ERROR;
default:
std::cerr << "Unknown nvGRAPH Status";
return GDF_CUDA_ERROR;
}
}
#ifdef VERBOSE
#define NVG_TRY(call) \
{ \
if ((call)!=NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error_verbose((call)); \
}
#else
#define NVG_TRY(call) \
{ \
nvgraphStatus_t err_code = (call); \
if (err_code != NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error(err_code); \
}
#endif
gdf_error gdf_createGraph_nvgraph(nvgraphHandle_t nvg_handle,
gdf_graph* gdf_G,
nvgraphGraphDescr_t* nvgraph_G,
bool use_transposed) {
// check input
GDF_REQUIRE(!((gdf_G->edgeList == nullptr) &&
(gdf_G->adjList == nullptr) &&
(gdf_G->transposedAdjList == nullptr)),
GDF_INVALID_API_CALL);
nvgraphTopologyType_t TT;
hipDataType settype;
// create an nvgraph graph handle
NVG_TRY(nvgraphCreateGraphDescr(nvg_handle, nvgraph_G));
// setup nvgraph variables
if (use_transposed) {
// convert edgeList to transposedAdjList
if (gdf_G->transposedAdjList == nullptr) {
GDF_TRY(gdf_add_transpose(gdf_G));
}
// using exiting transposedAdjList if it exisits and if adjList is missing
TT = NVGRAPH_CSC_32;
nvgraphCSCTopology32I_st topoData;
topoData.nvertices = gdf_G->transposedAdjList->offsets->size - 1;
topoData.nedges = gdf_G->transposedAdjList->indices->size;
topoData.destination_offsets = (int *) gdf_G->transposedAdjList->offsets->data;
topoData.source_indices = (int *) gdf_G->transposedAdjList->indices->data;
// attach the transposed adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void * )&topoData, TT));
//attach edge values
if (gdf_G->transposedAdjList->edge_data) {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = HIP_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(float * ) gdf_G->transposedAdjList->edge_data->data))
break;
case GDF_FLOAT64:
settype = HIP_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(double * ) gdf_G->transposedAdjList->edge_data->data))
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}
}
else {
// convert edgeList to adjList
if (gdf_G->adjList == nullptr) {
GDF_TRY(gdf_add_adj_list(gdf_G));
}
TT = NVGRAPH_CSR_32;
nvgraphCSRTopology32I_st topoData;
topoData.nvertices = gdf_G->adjList->offsets->size - 1;
topoData.nedges = gdf_G->adjList->indices->size;
topoData.source_offsets = (int *) gdf_G->adjList->offsets->data;
topoData.destination_indices = (int *) gdf_G->adjList->indices->data;
// attach adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void * )&topoData, TT));
//attach edge values
if (gdf_G->adjList->edge_data) {
switch (gdf_G->adjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = HIP_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(float * ) gdf_G->adjList->edge_data->data))
break;
case GDF_FLOAT64:
settype = HIP_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(double * ) gdf_G->adjList->edge_data->data))
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}
}
return GDF_SUCCESS;
}
gdf_error gdf_sssp_nvgraph(gdf_graph *gdf_G,
const int *source_vert,
gdf_column *sssp_distances) {
std::clock_t start;
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(*source_vert >= 0, GDF_INVALID_API_CALL);
GDF_REQUIRE(*source_vert < sssp_distances->size, GDF_INVALID_API_CALL);
GDF_REQUIRE(sssp_distances != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(sssp_distances->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!sssp_distances->valid, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(sssp_distances->size > 0, GDF_INVALID_API_CALL);
// init nvgraph
// TODO : time this call
nvgraphHandle_t nvg_handle = 0;
nvgraphGraphDescr_t nvgraph_G = 0;
hipDataType settype;
start = std::clock();
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, true));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << ","; // in ms
int sssp_index = 0;
int weight_index = 0;
Vector<float> d_val;
//RMM:
//
hipStream_t stream { nullptr };
rmm_temp_allocator allocator(stream);
start = std::clock();
if (gdf_G->transposedAdjList->edge_data == nullptr) {
// use a fp32 vector [1,...,1]
settype = HIP_R_32F;
d_val.resize(gdf_G->transposedAdjList->indices->size);
thrust::fill(thrust::hip::par(allocator).on(stream), d_val.begin(), d_val.end(), 1.0);
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
nvgraph_G,
weight_index,
settype,
(void * ) thrust::raw_pointer_cast(d_val.data())));
}
else {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = HIP_R_32F;
break;
case GDF_FLOAT64:
settype = HIP_R_64F;
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}
NVG_TRY(nvgraphAttachVertexData(nvg_handle, nvgraph_G, 0, settype, sssp_distances->data));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphSssp(nvg_handle, nvgraph_G, weight_index, source_vert, sssp_index));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << std::endl; // in ms
return GDF_SUCCESS;
}
gdf_error gdf_balancedCutClustering_nvgraph(gdf_graph* gdf_G,
const int num_clusters,
const int num_eigen_vects,
const float evs_tolerance,
const int evs_max_iter,
const float kmean_tolerance,
const int kmean_max_iter,
gdf_column* clustering) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Ensure that the input graph has values
GDF_TRY(gdf_add_adj_list(gdf_G));
GDF_REQUIRE(gdf_G->adjList->edge_data != nullptr, GDF_INVALID_API_CALL);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Pack parameters for call to Nvgraph
SpectralClusteringParameter param;
param.n_clusters = num_clusters;
param.n_eig_vects = num_eigen_vects;
param.algorithm = NVGRAPH_BALANCED_CUT_LANCZOS;
param.evs_tolerance = evs_tolerance;
param.evs_max_iter = evs_max_iter;
param.kmean_tolerance = kmean_tolerance;
param.kmean_max_iter = kmean_max_iter;
// Make call to Nvgraph balancedCutClustering
void* eig_vals = malloc(num_eigen_vects * sizeof(double));
void* eig_vects = malloc(num_eigen_vects * clustering->size * sizeof(double));
nvgraphStatus_t err = nvgraphSpectralClustering(nvg_handle,
nvgraph_G,
weight_index,
¶m,
(int*) clustering->data,
eig_vals,
eig_vects);
free(eig_vals);
free(eig_vects);
NVG_TRY(err);
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
return GDF_SUCCESS;
}
gdf_error gdf_spectralModularityMaximization_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
const int n_eig_vects,
const float evs_tolerance,
const int evs_max_iter,
const float kmean_tolerance,
const int kmean_max_iter,
gdf_column* clustering) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Ensure that the input graph has values
GDF_TRY(gdf_add_adj_list(gdf_G));
GDF_REQUIRE(gdf_G->adjList->edge_data != nullptr, GDF_INVALID_API_CALL);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Pack parameters for call to Nvgraph
SpectralClusteringParameter param;
param.n_clusters = n_clusters;
param.n_eig_vects = n_eig_vects;
param.algorithm = NVGRAPH_MODULARITY_MAXIMIZATION;
param.evs_tolerance = evs_tolerance;
param.evs_max_iter = evs_max_iter;
param.kmean_tolerance = kmean_tolerance;
param.kmean_max_iter = kmean_max_iter;
// Make call to Nvgraph balancedCutClustering
void* eig_vals = malloc(n_eig_vects * sizeof(double));
void* eig_vects = malloc(n_eig_vects * clustering->size * sizeof(double));
nvgraphStatus_t err = nvgraphSpectralClustering(nvg_handle,
nvgraph_G,
weight_index,
¶m,
(int*) clustering->data,
eig_vals,
eig_vects);
free(eig_vals);
free(eig_vects);
NVG_TRY(err);
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
return GDF_SUCCESS;
}
gdf_error gdf_AnalyzeClustering_modularity_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
gdf_column* clustering,
float* score) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Make Nvgraph call
NVG_TRY(nvgraphAnalyzeClustering(nvg_handle,
nvgraph_G,
weight_index,
n_clusters,
(const int* )clustering->data,
NVGRAPH_MODULARITY,
score));
return GDF_SUCCESS;
}
gdf_error gdf_AnalyzeClustering_edge_cut_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
gdf_column* clustering,
float* score) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Make Nvgraph call
NVG_TRY(nvgraphAnalyzeClustering(nvg_handle,
nvgraph_G,
weight_index,
n_clusters,
(const int* )clustering->data,
NVGRAPH_EDGE_CUT,
score));
return GDF_SUCCESS;
}
gdf_error gdf_AnalyzeClustering_ratio_cut_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
gdf_column* clustering,
float* score) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Make Nvgraph call
NVG_TRY(nvgraphAnalyzeClustering(nvg_handle,
nvgraph_G,
weight_index,
n_clusters,
(const int* )clustering->data,
NVGRAPH_RATIO_CUT,
score));
return GDF_SUCCESS;
}
| 268a858567bc999cd47e7ac72b9d2d5d2aeadf96.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Wrapper functions for Nvgraph
*
* @file nvgraph_gdf.cu
* ---------------------------------------------------------------------------**/
#include <nvgraph_gdf.h>
#include <thrust/device_vector.h>
#include <ctime>
#include "utilities/error_utils.h"
//RMM:
//
#include <rmm_utils.h>
template<typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
gdf_error nvgraph2gdf_error(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_SUCCESS:
return GDF_SUCCESS;
case NVGRAPH_STATUS_NOT_INITIALIZED:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_INVALID_VALUE:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
return GDF_UNSUPPORTED_DTYPE;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
return GDF_INVALID_API_CALL;
default:
return GDF_CUDA_ERROR;
}
}
gdf_error nvgraph2gdf_error_verbose(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_NOT_INITIALIZED:
std::cerr << "nvGRAPH not initialized";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ALLOC_FAILED:
std::cerr << "nvGRAPH alloc failed";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INVALID_VALUE:
std::cerr << "nvGRAPH invalid value";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ARCH_MISMATCH:
std::cerr << "nvGRAPH arch mismatch";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_MAPPING_ERROR:
std::cerr << "nvGRAPH mapping error";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_EXECUTION_FAILED:
std::cerr << "nvGRAPH execution failed";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INTERNAL_ERROR:
std::cerr << "nvGRAPH internal error";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH type not supported";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_NOT_CONVERGED:
std::cerr << "nvGRAPH algorithm failed to converge";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH graph type not supported";
return GDF_CUDA_ERROR;
default:
std::cerr << "Unknown nvGRAPH Status";
return GDF_CUDA_ERROR;
}
}
#ifdef VERBOSE
#define NVG_TRY(call) \
{ \
if ((call)!=NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error_verbose((call)); \
}
#else
#define NVG_TRY(call) \
{ \
nvgraphStatus_t err_code = (call); \
if (err_code != NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error(err_code); \
}
#endif
gdf_error gdf_createGraph_nvgraph(nvgraphHandle_t nvg_handle,
gdf_graph* gdf_G,
nvgraphGraphDescr_t* nvgraph_G,
bool use_transposed) {
// check input
GDF_REQUIRE(!((gdf_G->edgeList == nullptr) &&
(gdf_G->adjList == nullptr) &&
(gdf_G->transposedAdjList == nullptr)),
GDF_INVALID_API_CALL);
nvgraphTopologyType_t TT;
cudaDataType_t settype;
// create an nvgraph graph handle
NVG_TRY(nvgraphCreateGraphDescr(nvg_handle, nvgraph_G));
// setup nvgraph variables
if (use_transposed) {
// convert edgeList to transposedAdjList
if (gdf_G->transposedAdjList == nullptr) {
GDF_TRY(gdf_add_transpose(gdf_G));
}
// using exiting transposedAdjList if it exisits and if adjList is missing
TT = NVGRAPH_CSC_32;
nvgraphCSCTopology32I_st topoData;
topoData.nvertices = gdf_G->transposedAdjList->offsets->size - 1;
topoData.nedges = gdf_G->transposedAdjList->indices->size;
topoData.destination_offsets = (int *) gdf_G->transposedAdjList->offsets->data;
topoData.source_indices = (int *) gdf_G->transposedAdjList->indices->data;
// attach the transposed adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void * )&topoData, TT));
//attach edge values
if (gdf_G->transposedAdjList->edge_data) {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(float * ) gdf_G->transposedAdjList->edge_data->data))
break;
case GDF_FLOAT64:
settype = CUDA_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(double * ) gdf_G->transposedAdjList->edge_data->data))
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}
}
else {
// convert edgeList to adjList
if (gdf_G->adjList == nullptr) {
GDF_TRY(gdf_add_adj_list(gdf_G));
}
TT = NVGRAPH_CSR_32;
nvgraphCSRTopology32I_st topoData;
topoData.nvertices = gdf_G->adjList->offsets->size - 1;
topoData.nedges = gdf_G->adjList->indices->size;
topoData.source_offsets = (int *) gdf_G->adjList->offsets->data;
topoData.destination_indices = (int *) gdf_G->adjList->indices->data;
// attach adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void * )&topoData, TT));
//attach edge values
if (gdf_G->adjList->edge_data) {
switch (gdf_G->adjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(float * ) gdf_G->adjList->edge_data->data))
break;
case GDF_FLOAT64:
settype = CUDA_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvgraph_G,
0,
settype,
(double * ) gdf_G->adjList->edge_data->data))
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}
}
return GDF_SUCCESS;
}
gdf_error gdf_sssp_nvgraph(gdf_graph *gdf_G,
const int *source_vert,
gdf_column *sssp_distances) {
std::clock_t start;
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(*source_vert >= 0, GDF_INVALID_API_CALL);
GDF_REQUIRE(*source_vert < sssp_distances->size, GDF_INVALID_API_CALL);
GDF_REQUIRE(sssp_distances != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(sssp_distances->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!sssp_distances->valid, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(sssp_distances->size > 0, GDF_INVALID_API_CALL);
// init nvgraph
// TODO : time this call
nvgraphHandle_t nvg_handle = 0;
nvgraphGraphDescr_t nvgraph_G = 0;
cudaDataType_t settype;
start = std::clock();
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, true));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << ","; // in ms
int sssp_index = 0;
int weight_index = 0;
Vector<float> d_val;
//RMM:
//
cudaStream_t stream { nullptr };
rmm_temp_allocator allocator(stream);
start = std::clock();
if (gdf_G->transposedAdjList->edge_data == nullptr) {
// use a fp32 vector [1,...,1]
settype = CUDA_R_32F;
d_val.resize(gdf_G->transposedAdjList->indices->size);
thrust::fill(thrust::cuda::par(allocator).on(stream), d_val.begin(), d_val.end(), 1.0);
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
nvgraph_G,
weight_index,
settype,
(void * ) thrust::raw_pointer_cast(d_val.data())));
}
else {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
break;
case GDF_FLOAT64:
settype = CUDA_R_64F;
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}
NVG_TRY(nvgraphAttachVertexData(nvg_handle, nvgraph_G, 0, settype, sssp_distances->data));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphSssp(nvg_handle, nvgraph_G, weight_index, source_vert, sssp_index));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
std::cout << (std::clock() - start) / (double) (CLOCKS_PER_SEC / 1000) << std::endl; // in ms
return GDF_SUCCESS;
}
gdf_error gdf_balancedCutClustering_nvgraph(gdf_graph* gdf_G,
const int num_clusters,
const int num_eigen_vects,
const float evs_tolerance,
const int evs_max_iter,
const float kmean_tolerance,
const int kmean_max_iter,
gdf_column* clustering) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Ensure that the input graph has values
GDF_TRY(gdf_add_adj_list(gdf_G));
GDF_REQUIRE(gdf_G->adjList->edge_data != nullptr, GDF_INVALID_API_CALL);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Pack parameters for call to Nvgraph
SpectralClusteringParameter param;
param.n_clusters = num_clusters;
param.n_eig_vects = num_eigen_vects;
param.algorithm = NVGRAPH_BALANCED_CUT_LANCZOS;
param.evs_tolerance = evs_tolerance;
param.evs_max_iter = evs_max_iter;
param.kmean_tolerance = kmean_tolerance;
param.kmean_max_iter = kmean_max_iter;
// Make call to Nvgraph balancedCutClustering
void* eig_vals = malloc(num_eigen_vects * sizeof(double));
void* eig_vects = malloc(num_eigen_vects * clustering->size * sizeof(double));
nvgraphStatus_t err = nvgraphSpectralClustering(nvg_handle,
nvgraph_G,
weight_index,
¶m,
(int*) clustering->data,
eig_vals,
eig_vects);
free(eig_vals);
free(eig_vects);
NVG_TRY(err);
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
return GDF_SUCCESS;
}
gdf_error gdf_spectralModularityMaximization_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
const int n_eig_vects,
const float evs_tolerance,
const int evs_max_iter,
const float kmean_tolerance,
const int kmean_max_iter,
gdf_column* clustering) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Ensure that the input graph has values
GDF_TRY(gdf_add_adj_list(gdf_G));
GDF_REQUIRE(gdf_G->adjList->edge_data != nullptr, GDF_INVALID_API_CALL);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Pack parameters for call to Nvgraph
SpectralClusteringParameter param;
param.n_clusters = n_clusters;
param.n_eig_vects = n_eig_vects;
param.algorithm = NVGRAPH_MODULARITY_MAXIMIZATION;
param.evs_tolerance = evs_tolerance;
param.evs_max_iter = evs_max_iter;
param.kmean_tolerance = kmean_tolerance;
param.kmean_max_iter = kmean_max_iter;
// Make call to Nvgraph balancedCutClustering
void* eig_vals = malloc(n_eig_vects * sizeof(double));
void* eig_vects = malloc(n_eig_vects * clustering->size * sizeof(double));
nvgraphStatus_t err = nvgraphSpectralClustering(nvg_handle,
nvgraph_G,
weight_index,
¶m,
(int*) clustering->data,
eig_vals,
eig_vects);
free(eig_vals);
free(eig_vects);
NVG_TRY(err);
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
return GDF_SUCCESS;
}
gdf_error gdf_AnalyzeClustering_modularity_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
gdf_column* clustering,
float* score) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Make Nvgraph call
NVG_TRY(nvgraphAnalyzeClustering(nvg_handle,
nvgraph_G,
weight_index,
n_clusters,
(const int* )clustering->data,
NVGRAPH_MODULARITY,
score));
return GDF_SUCCESS;
}
gdf_error gdf_AnalyzeClustering_edge_cut_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
gdf_column* clustering,
float* score) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Make Nvgraph call
NVG_TRY(nvgraphAnalyzeClustering(nvg_handle,
nvgraph_G,
weight_index,
n_clusters,
(const int* )clustering->data,
NVGRAPH_EDGE_CUT,
score));
return GDF_SUCCESS;
}
gdf_error gdf_AnalyzeClustering_ratio_cut_nvgraph(gdf_graph* gdf_G,
const int n_clusters,
gdf_column* clustering,
float* score) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE((gdf_G->adjList != nullptr) || (gdf_G->edgeList != nullptr), GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(clustering->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!clustering->valid, GDF_VALIDITY_UNSUPPORTED);
// Initialize Nvgraph and wrap the graph
nvgraphHandle_t nvg_handle = nullptr;
nvgraphGraphDescr_t nvgraph_G = nullptr;
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, false));
int weight_index = 0;
// Make Nvgraph call
NVG_TRY(nvgraphAnalyzeClustering(nvg_handle,
nvgraph_G,
weight_index,
n_clusters,
(const int* )clustering->data,
NVGRAPH_RATIO_CUT,
score));
return GDF_SUCCESS;
}
|
c322ebcbe615d183742238ce8b206bd30231553a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void Disassemble_gpu(double Xinv[],double Zs[],double oldAF[], double newAF[], int numBlocks, int lesslen);
void printm(double A[6][6]);
void printa(double A[], int x);
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], float times[])
{
double *d_oldAF, *d_newAF,*d_Xinv;
double *d_zs;
float time1;
float time2;
hipEvent_t beginEvent1;
hipEvent_t endEvent1;
hipEvent_t beginEvent2;
hipEvent_t endEvent2;
int newlen = (int) morelen*4;
int numBlocks = (int) morelen-lesslen;
hipEventCreate( &beginEvent1 );
hipEventCreate( &endEvent1 );
hipEventRecord( beginEvent1, 0 );
hipMalloc(&d_zs,sizeof(double)*(morelen)*6*26);
hipMalloc(&d_newAF,sizeof(double)*(newlen)*6);
hipMalloc(&d_oldAF,sizeof(double)*(lesslen)*4*6);
hipMalloc(&d_Xinv,sizeof(double)*(lesslen)*5*5);
hipMemcpy(d_zs, Zs, sizeof(double)*(morelen)*6*26, hipMemcpyHostToDevice);
hipMemcpy(d_Xinv, nXs, sizeof(double)*(lesslen)*5*5, hipMemcpyHostToDevice);
hipMemcpy(d_oldAF, OldAF, sizeof(double)*(lesslen)*4*6, hipMemcpyHostToDevice);
hipEventRecord( endEvent1, 0 );
hipEventSynchronize( endEvent1 );
hipEventElapsedTime( &time1, beginEvent1, endEvent1 );
dim3 dimBlock(6, 6,1);
dim3 dimGrid(numBlocks,1,1);
hipLaunchKernelGGL(( Disassemble_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Xinv, d_zs, d_oldAF, d_newAF, morelen,lesslen);
hipEventCreate( &beginEvent2 );
hipEventCreate( &endEvent2 );
hipEventRecord( beginEvent2, 0 );
hipMemcpy(AF, d_newAF,sizeof(double)*(newlen)*6, hipMemcpyDeviceToHost);
hipEventRecord( endEvent2, 0 );
hipEventSynchronize( endEvent2 );
hipEventElapsedTime( &time2, beginEvent2, endEvent2 );
if(odd ==1)
{
for (int r = 0; r<6;r++)
{
AF[r*morelen*4+morelen*4-4]=OldAF[r*lesslen*4+lesslen*4-4];
AF[r*morelen*4+morelen*4-3]=OldAF[r*lesslen*4+lesslen*4-3];
AF[r*morelen*4+morelen*4-2]=OldAF[r*lesslen*4+lesslen*4-2];
AF[r*morelen*4+morelen*4-1]=OldAF[r*lesslen*4+lesslen*4-1];
}
}
//std::cin.get();
times[0] += time1+time2;
hipFree(d_zs);
hipFree(d_newAF);
hipFree(d_oldAF);
hipFree(d_Xinv);
}
| c322ebcbe615d183742238ce8b206bd30231553a.cu | #include <iostream>
__global__ void Disassemble_gpu(double Xinv[],double Zs[],double oldAF[], double newAF[], int numBlocks, int lesslen);
void printm(double A[6][6]);
void printa(double A[], int x);
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], float times[])
{
double *d_oldAF, *d_newAF,*d_Xinv;
double *d_zs;
float time1;
float time2;
cudaEvent_t beginEvent1;
cudaEvent_t endEvent1;
cudaEvent_t beginEvent2;
cudaEvent_t endEvent2;
int newlen = (int) morelen*4;
int numBlocks = (int) morelen-lesslen;
cudaEventCreate( &beginEvent1 );
cudaEventCreate( &endEvent1 );
cudaEventRecord( beginEvent1, 0 );
cudaMalloc(&d_zs,sizeof(double)*(morelen)*6*26);
cudaMalloc(&d_newAF,sizeof(double)*(newlen)*6);
cudaMalloc(&d_oldAF,sizeof(double)*(lesslen)*4*6);
cudaMalloc(&d_Xinv,sizeof(double)*(lesslen)*5*5);
cudaMemcpy(d_zs, Zs, sizeof(double)*(morelen)*6*26, cudaMemcpyHostToDevice);
cudaMemcpy(d_Xinv, nXs, sizeof(double)*(lesslen)*5*5, cudaMemcpyHostToDevice);
cudaMemcpy(d_oldAF, OldAF, sizeof(double)*(lesslen)*4*6, cudaMemcpyHostToDevice);
cudaEventRecord( endEvent1, 0 );
cudaEventSynchronize( endEvent1 );
cudaEventElapsedTime( &time1, beginEvent1, endEvent1 );
dim3 dimBlock(6, 6,1);
dim3 dimGrid(numBlocks,1,1);
Disassemble_gpu<<<dimGrid, dimBlock>>>(d_Xinv, d_zs, d_oldAF, d_newAF, morelen,lesslen);
cudaEventCreate( &beginEvent2 );
cudaEventCreate( &endEvent2 );
cudaEventRecord( beginEvent2, 0 );
cudaMemcpy(AF, d_newAF,sizeof(double)*(newlen)*6, cudaMemcpyDeviceToHost);
cudaEventRecord( endEvent2, 0 );
cudaEventSynchronize( endEvent2 );
cudaEventElapsedTime( &time2, beginEvent2, endEvent2 );
if(odd ==1)
{
for (int r = 0; r<6;r++)
{
AF[r*morelen*4+morelen*4-4]=OldAF[r*lesslen*4+lesslen*4-4];
AF[r*morelen*4+morelen*4-3]=OldAF[r*lesslen*4+lesslen*4-3];
AF[r*morelen*4+morelen*4-2]=OldAF[r*lesslen*4+lesslen*4-2];
AF[r*morelen*4+morelen*4-1]=OldAF[r*lesslen*4+lesslen*4-1];
}
}
//std::cin.get();
times[0] += time1+time2;
cudaFree(d_zs);
cudaFree(d_newAF);
cudaFree(d_oldAF);
cudaFree(d_Xinv);
}
|
4bfab23d3ee9a52186846358a3120e2d00ff453d.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <c10/util/Exception.h>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <TH/THGeneral.h>
#include <hipsparse.h>
namespace at { namespace native { namespace sparse { namespace cuda {
std::string hipsparseGetErrorString(hipsparseStatus_t status) {
switch(status)
{
case HIPSPARSE_STATUS_SUCCESS:
return "success";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
{
std::ostringstream oss;
oss << "unknown error " << static_cast<int64_t>(status);
return oss.str();
}
}
}
inline void CUSPARSE_CHECK(hipsparseStatus_t status)
{
if (status != HIPSPARSE_STATUS_SUCCESS) {
AT_ERROR("cusparse runtime error: ", hipsparseGetErrorString(status));
}
}
inline hipsparseHandle_t setCUDASparseStream() {
hipsparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
return handle;
}
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
AT_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"hipsparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(hipsparseXcoo2csr(handle, coorowind, nnz, m, csrrowptr,
TH_INDEX_BASE ? HIPSPARSE_INDEX_BASE_ONE : HIPSPARSE_INDEX_BASE_ZERO
));
}
hipsparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return HIPSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return HIPSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = setCUDASparseStream();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
#if TH_INDEX_BASE == 1
hipsparseSetMatIndexBase(&desc, HIPSPARSE_INDEX_BASE_ONE);
#endif
CUSPARSE_CHECK(hipsparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = setCUDASparseStream();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
#if TH_INDEX_BASE == 1
hipsparseSetMatIndexBase(&desc, HIPSPARSE_INDEX_BASE_ONE);
#endif
CUSPARSE_CHECK(hipsparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
// TODO: I think this leaks the matrix descriptor. Proper fix is to create
// real descriptor classes
}
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
AT_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
hipsparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(hipsparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
#if TH_INDEX_BASE == 1
hipsparseSetMatIndexBase(&desc, HIPSPARSE_INDEX_BASE_ONE);
#endif
CUSPARSE_CHECK(hipsparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
// TODO: I think this leaks the matrix descriptor.
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(hipsparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
| 4bfab23d3ee9a52186846358a3120e2d00ff453d.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Exception.h>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <TH/THGeneral.h>
#include <cusparse.h>
namespace at { namespace native { namespace sparse { namespace cuda {
std::string cusparseGetErrorString(cusparseStatus_t status) {
switch(status)
{
case CUSPARSE_STATUS_SUCCESS:
return "success";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case CUSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
{
std::ostringstream oss;
oss << "unknown error " << static_cast<int64_t>(status);
return oss.str();
}
}
}
inline void CUSPARSE_CHECK(cusparseStatus_t status)
{
if (status != CUSPARSE_STATUS_SUCCESS) {
AT_ERROR("cusparse runtime error: ", cusparseGetErrorString(status));
}
}
inline cusparseHandle_t setCUDASparseStream() {
cusparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
cusparseSetStream(handle, at::cuda::getCurrentCUDAStream());
return handle;
}
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
AT_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"cusparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(cusparseXcoo2csr(handle, coorowind, nnz, m, csrrowptr,
TH_INDEX_BASE ? CUSPARSE_INDEX_BASE_ONE : CUSPARSE_INDEX_BASE_ZERO
));
}
cusparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return CUSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return CUSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = setCUDASparseStream();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
#if TH_INDEX_BASE == 1
cusparseSetMatIndexBase(&desc, CUSPARSE_INDEX_BASE_ONE);
#endif
CUSPARSE_CHECK(cusparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = setCUDASparseStream();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
#if TH_INDEX_BASE == 1
cusparseSetMatIndexBase(&desc, CUSPARSE_INDEX_BASE_ONE);
#endif
CUSPARSE_CHECK(cusparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
// TODO: I think this leaks the matrix descriptor. Proper fix is to create
// real descriptor classes
}
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
AT_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
cusparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(cusparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
#if TH_INDEX_BASE == 1
cusparseSetMatIndexBase(&desc, CUSPARSE_INDEX_BASE_ONE);
#endif
CUSPARSE_CHECK(cusparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
// TODO: I think this leaks the matrix descriptor.
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
AT_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = setCUDASparseStream();
CUSPARSE_CHECK(cusparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
|
43aa95da78603fe5db52bfa2dc2050512c7a65fb.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cstdio>
#include "paddle/fluid/inference/tensorrt/plugin/deformable_conv_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
static inline int ConvOutputSize(int input_size, int filter_size, int dilation,
int padding, int stride) {
const int dkernel = dilation * (filter_size - 1) + 1;
int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
return output_size;
}
nvinfer1::Weights DeformableConvPlugin::copyToDevice(const void* hostData,
size_t count) {
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
void* deviceData;
PADDLE_ENFORCE_GPU_SUCCESS(hipMalloc(&deviceData, count * num_bytes));
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(deviceData, hostData, count * num_bytes,
hipMemcpyHostToDevice));
return nvinfer1::Weights{data_type_, deviceData, int64_t(count)};
}
void DeformableConvPlugin::serializeFromDevice(
void** hostBuffer, const nvinfer1::Weights& deviceWeights) const {
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpy(static_cast<char*>(*hostBuffer), deviceWeights.values,
deviceWeights.count * num_bytes, hipMemcpyDeviceToHost));
*hostBuffer =
reinterpret_cast<char*>(*hostBuffer) + deviceWeights.count * num_bytes;
}
nvinfer1::Weights DeformableConvPlugin::deserializeToDevice(
const void** hostBuffer, size_t count) {
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
nvinfer1::Weights w =
copyToDevice(static_cast<const char*>(*hostBuffer), count);
*hostBuffer = reinterpret_cast<const char*>(*hostBuffer) + count * num_bytes;
return w;
}
DeformableConvPlugin::DeformableConvPlugin(
const nvinfer1::DataType data_type, const nvinfer1::Weights& weights,
const std::vector<int>& kernel_dims, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& dilations,
const int groups, const int deformable_groups, const int im2col_step,
const bool with_fp16)
: data_type_(data_type),
groups_(groups),
deformable_groups_(deformable_groups),
im2col_step_(im2col_step),
with_fp16_(with_fp16) {
weights_ = copyToDevice(weights.values, weights.count);
kernel_dims_.insert(kernel_dims_.end(), kernel_dims.cbegin(),
kernel_dims.cend());
strides_.insert(strides_.end(), strides.cbegin(), strides.cend());
paddings_.insert(paddings_.end(), paddings.cbegin(), paddings.cend());
dilations_.insert(dilations_.end(), dilations.cbegin(), dilations.cend());
PADDLE_ENFORCE_EQ(data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF,
true,
platform::errors::InvalidArgument(
"The DeformableConv TRT Plugin's input type "
"should be float or half."));
PADDLE_ENFORCE_EQ(
paddings_.size(), strides_.size(),
platform::errors::InvalidArgument(
"The size of paddings (%d) is not equal to the size of strides (%d).",
paddings_.size(), strides_.size()));
}
DeformableConvPlugin::DeformableConvPlugin(
const nvinfer1::DataType data_type, const nvinfer1::Weights& weights,
const std::vector<int>& kernel_dims, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& dilations,
const int groups, const int deformable_groups, const int im2col_step,
const std::vector<int>& input_dim, const std::vector<int>& offset_dim,
const std::vector<int>& mask_dim, const std::vector<int>& output_dim,
const bool with_fp16)
: data_type_(data_type),
groups_(groups),
deformable_groups_(deformable_groups),
im2col_step_(im2col_step),
with_fp16_(with_fp16) {
weights_ = copyToDevice(weights.values, weights.count);
kernel_dims_.insert(kernel_dims_.end(), kernel_dims.cbegin(),
kernel_dims.cend());
strides_.insert(strides_.end(), strides.cbegin(), strides.cend());
paddings_.insert(paddings_.end(), paddings.cbegin(), paddings.cend());
dilations_.insert(dilations_.end(), dilations.cbegin(), dilations.cend());
input_dim_.insert(input_dim_.end(), input_dim.cbegin(), input_dim.cend());
offset_dim_.insert(offset_dim_.end(), offset_dim.cbegin(), offset_dim.cend());
mask_dim_.insert(mask_dim_.end(), mask_dim.cbegin(), mask_dim.cend());
output_dim_.insert(output_dim_.end(), output_dim.cbegin(), output_dim.cend());
PADDLE_ENFORCE_EQ(data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF,
true,
platform::errors::InvalidArgument(
"The DeformableConv TRT Plugin's input type "
"should be float or half."));
PADDLE_ENFORCE_EQ(
paddings_.size(), strides_.size(),
platform::errors::InvalidArgument(
"The size of paddings (%d) is not equal to the size of strides (%d).",
paddings_.size(), strides_.size()));
}
DeformableConvPlugin::DeformableConvPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &strides_);
DeserializeValue(&data, &length, &paddings_);
DeserializeValue(&data, &length, &dilations_);
DeserializeValue(&data, &length, &groups_);
DeserializeValue(&data, &length, &deformable_groups_);
DeserializeValue(&data, &length, &im2col_step_);
DeserializeValue(&data, &length, &kernel_dims_);
int64_t count;
DeserializeValue(&data, &length, &count);
weights_ = deserializeToDevice(&data, count);
DeserializeValue(&data, &length, &input_dim_);
DeserializeValue(&data, &length, &offset_dim_);
DeserializeValue(&data, &length, &mask_dim_);
DeserializeValue(&data, &length, &output_dim_);
DeserializeValue(&data, &length, &with_fp16_);
}
DeformableConvPlugin::~DeformableConvPlugin() {
if (weights_.values) {
hipFree(const_cast<void*>(weights_.values));
weights_.values = nullptr;
}
}
const char* DeformableConvPlugin::getPluginType() const TRT_NOEXCEPT {
return "deformable_conv_plugin";
}
const char* DeformableConvPlugin::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
int DeformableConvPlugin::getNbOutputs() const TRT_NOEXCEPT { return 1; }
nvinfer1::Dims DeformableConvPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* inputs, int nb_input_dims) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nb_input_dims, 3,
platform::errors::InvalidArgument(
"The number of inputs should be equal to 3, but got %d",
nb_input_dims));
nvinfer1::Dims ret;
ret.nbDims = inputs[0].nbDims;
ret.d[0] = kernel_dims_[0];
ret.d[1] = ConvOutputSize(inputs[0].d[1], kernel_dims_[2], dilations_[0],
paddings_[0], strides_[0]);
ret.d[2] = ConvOutputSize(inputs[0].d[2], kernel_dims_[3], dilations_[1],
paddings_[1], strides_[1]);
return ret;
}
bool DeformableConvPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::TensorFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
#ifdef TRT_PLUGIN_FP16_AVALIABLE
return (type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::TensorFormat::kLINEAR);
#else
return (type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::TensorFormat::kLINEAR);
#endif
} else {
return (type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::TensorFormat::kLINEAR);
}
}
size_t DeformableConvPlugin::getWorkspaceSize(int max_batch_size) const
TRT_NOEXCEPT {
int c_i = input_dim_[0], h_i = input_dim_[1], w_i = input_dim_[2];
int k_h = kernel_dims_[2], k_w = kernel_dims_[3];
int c_o = output_dim_[0], h_o = output_dim_[1], w_o = output_dim_[2];
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
size_t data_col_size = static_cast<size_t>(c_i * k_h * k_w * im2col_step_ *
h_o * w_o * num_bytes);
return data_col_size;
}
int DeformableConvPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace,
#else
void* const* outputs, void* workspace,
#endif
hipStream_t stream) TRT_NOEXCEPT {
if (data_type_ == nvinfer1::DataType::kFLOAT) {
enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
} else if (data_type_ == nvinfer1::DataType::kHALF) {
#if TRT_PLUGIN_FP16_AVALIABLE
enqueue_impl<half>(batch_size, inputs, outputs, workspace, stream);
#else
PADDLE_THROW(platform::errors::InvalidArgument(
"Current CUDA arch dose not support fp16. Please use fp32 instead."));
#endif
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The DeformableConv TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
template <typename T>
__device__ T kFloor(T x);
template <>
__device__ half kFloor<half>(half x) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
return hfloor(x);
#endif
}
template <>
__device__ float kFloor<float>(float x) {
return floor(x);
}
template <typename T>
__device__ T DmcnIm2colBilinear(const T* bottom_data, const int data_width,
const int height, const int width, T h, T w);
template <>
__device__ float DmcnIm2colBilinear<float>(const float* bottom_data,
const int data_width,
const int height, const int width,
float h, float w) {
int h_low = kFloor<float>(h);
int w_low = kFloor<float>(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float h_low_t = h_low, w_low_t = w_low, one = 1.0f;
float lh = h - h_low_t;
float lw = w - w_low_t;
float hh = one - lh, hw = one - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <>
__device__ half DmcnIm2colBilinear<half>(const half* bottom_data,
const int data_width, const int height,
const int width, half h, half w) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int h_low = kFloor<half>(h);
int w_low = kFloor<half>(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
half h_low_t = h_low, w_low_t = w_low, one = 1.0f;
half lh = h - h_low_t;
half lw = w - w_low_t;
half hh = one - lh, hw = one - lw;
half v1 = 0;
if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low];
half v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
half v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
half v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
half w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
half val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
#endif
}
template <typename T>
__global__ void ModulatedDeformableIm2colGpuKernel(
const int nthreads, const T* data_im, const T* data_offset,
const T* data_mask, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int batch_size,
const int num_channels, const int deformable_group, const int height_col,
const int width_col, T* data_col);
template <>
__global__ void ModulatedDeformableIm2colGpuKernel<float>(
const int nthreads, const float* data_im, const float* data_offset,
const float* data_mask, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col, float* data_col) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
float minus_one = -1.0f, height_t = height, width_t = width;
for (size_t i = index; i < nthreads; i += offset) {
const int w_col = i % width_col;
const int h_col = (i / width_col) % height_col;
const int b_col = (i / width_col) / height_col % batch_size;
const int c_im = (i / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
float* data_col_ptr =
data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const float* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const float* data_offset_ptr =
data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const float* data_mask_ptr =
data_mask + (b_col * deformable_group + deformable_group_index) *
kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = 0;
float h_im_t = h_in + i * dilation_h, w_im_t = w_in + j * dilation_w;
const float h_im = h_im_t + offset_h;
const float w_im = w_im_t + offset_w;
if (h_im > minus_one && w_im > minus_one && h_im < height_t &&
w_im < width_t) {
val = DmcnIm2colBilinear<float>(data_im_ptr, width, height, width,
h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <>
__global__ void ModulatedDeformableIm2colGpuKernel<half>(
const int nthreads, const half* data_im, const half* data_offset,
const half* data_mask, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col, half* data_col) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
half minus_one = -1.0f, height_t = height, width_t = width;
for (size_t i = index; i < nthreads; i += offset) {
const int w_col = i % width_col;
const int h_col = (i / width_col) % height_col;
const int b_col = (i / width_col) / height_col % batch_size;
const int c_im = (i / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
half* data_col_ptr =
data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const half* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const half* data_offset_ptr =
data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const half* data_mask_ptr =
data_mask + (b_col * deformable_group + deformable_group_index) *
kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const half offset_h = data_offset_ptr[data_offset_h_ptr];
const half offset_w = data_offset_ptr[data_offset_w_ptr];
const half mask = data_mask_ptr[data_mask_hw_ptr];
half val = 0;
half h_im_t = h_in + i * dilation_h, w_im_t = w_in + j * dilation_w;
const half h_im = h_im_t + offset_h;
const half w_im = w_im_t + offset_w;
if (h_im > minus_one && w_im > minus_one && h_im < height_t &&
w_im < width_t) {
val = DmcnIm2colBilinear<half>(data_im_ptr, width, height, width,
h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
#endif
}
template <typename T>
void gemm_impl(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k, const T* alpha,
const T* A, int lda, const T* B, int ldb, const T* beta, T* C,
int ldc);
template <>
void gemm_impl<float>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k,
const float* alpha, const float* A, int lda,
const float* B, int ldb, const float* beta, float* C,
int ldc) {
platform::dynload::hipblasSgemm(handle, transa, transb, m, n, k, alpha, A, lda,
B, ldb, beta, C, ldc);
}
template <>
void gemm_impl<half>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k,
const half* alpha, const half* A, int lda, const half* B,
int ldb, const half* beta, half* C, int ldc) {
#if TRT_PLUGIN_FP16_AVALIABLE
platform::dynload::hipblasHgemm(handle, transa, transb, m, n, k, alpha, A, lda,
B, ldb, beta, C, ldc);
#else
PADDLE_THROW(platform::errors::InvalidArgument(
"Current CUDA arch dose not support fp16. Please use fp32 instead."));
#endif
}
template <typename T>
int DeformableConvPlugin::enqueue_impl(int batch_size,
const void* const* inputs,
void* const* outputs, void* workspace,
hipStream_t stream) {
const T* input = reinterpret_cast<const T*>(inputs[0]);
const T* offset = reinterpret_cast<const T*>(inputs[1]);
const T* mask = reinterpret_cast<const T*>(inputs[2]);
const T* filter = reinterpret_cast<const T*>(weights_.values);
T* output = reinterpret_cast<T*>(outputs[0]);
int c_i = input_dim_[0], h_i = input_dim_[1], w_i = input_dim_[2];
int k_h = kernel_dims_[2], k_w = kernel_dims_[3];
int c_o = output_dim_[0], h_o = output_dim_[1], w_o = output_dim_[2];
int input_stride = c_i * h_i * w_i;
int offset_stride = offset_dim_[0] * offset_dim_[1] * offset_dim_[2];
int mask_stride = mask_dim_[0] * mask_dim_[1] * mask_dim_[2];
int output_stride = c_o * h_o * w_o;
int M = c_o / groups_;
int N = im2col_step_ * h_o * w_o;
int K = c_i * k_h * k_w / groups_;
// c_i / deformable_groups
int channel_per_deformable_group = c_i / deformable_groups_;
// c_i * im2col_step * h_o * w_o
int num_kernels = c_i * im2col_step_ * h_o * w_o;
int blocks = NumBlocks(num_kernels);
int threads = kNumCUDAThreads;
T alpha = static_cast<T>(1.0f);
T beta = static_cast<T>(0.0f);
for (int i = 0; i < batch_size / im2col_step_; ++i) {
const T* data_im = input + i * im2col_step_ * input_stride;
const T* data_offset = offset + i * im2col_step_ * offset_stride;
const T* data_mask = mask + i * im2col_step_ * mask_stride;
T* data_col = reinterpret_cast<T*>(workspace);
hipLaunchKernelGGL(( ModulatedDeformableIm2colGpuKernel<T>), dim3(blocks), dim3(threads), 0, stream,
num_kernels, data_im, data_offset, data_mask, h_i, w_i, k_h, k_w,
paddings_[0], paddings_[1], strides_[0], strides_[1], dilations_[0],
dilations_[1], channel_per_deformable_group, im2col_step_, c_i,
deformable_groups_, h_o, w_o, data_col);
for (int g = 0; g < groups_; ++g) {
const T* weight = filter + g * M * K;
const T* col = data_col + g * K * N;
T* out = output + i * im2col_step_ * output_stride + g * M * N;
gemm_impl<T>(cublasHandle_, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha,
col, N, weight, K, &beta, out, N);
}
}
return 0;
}
int DeformableConvPlugin::initialize() TRT_NOEXCEPT { return 0; }
void DeformableConvPlugin::terminate() TRT_NOEXCEPT {}
size_t DeformableConvPlugin::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(strides_);
serialize_size += SerializedSize(paddings_);
serialize_size += SerializedSize(dilations_);
serialize_size += SerializedSize(groups_);
serialize_size += SerializedSize(deformable_groups_);
serialize_size += SerializedSize(im2col_step_);
serialize_size += SerializedSize(kernel_dims_);
serialize_size += SerializedSize(weights_.count);
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
serialize_size += weights_.count * num_bytes;
serialize_size += SerializedSize(input_dim_);
serialize_size += SerializedSize(offset_dim_);
serialize_size += SerializedSize(mask_dim_);
serialize_size += SerializedSize(output_dim_);
serialize_size += SerializedSize(with_fp16_);
return serialize_size;
}
void DeformableConvPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, dilations_);
SerializeValue(&buffer, groups_);
SerializeValue(&buffer, deformable_groups_);
SerializeValue(&buffer, im2col_step_);
SerializeValue(&buffer, kernel_dims_);
SerializeValue(&buffer, weights_.count);
serializeFromDevice(&buffer, weights_);
SerializeValue(&buffer, input_dim_);
SerializeValue(&buffer, offset_dim_);
SerializeValue(&buffer, mask_dim_);
SerializeValue(&buffer, output_dim_);
SerializeValue(&buffer, with_fp16_);
}
void DeformableConvPlugin::destroy() TRT_NOEXCEPT {}
void DeformableConvPlugin::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* DeformableConvPlugin::getPluginNamespace() const TRT_NOEXCEPT {
return namespace_.c_str();
}
nvinfer1::DataType DeformableConvPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type,
int nb_inputs) const TRT_NOEXCEPT {
return input_type[0];
}
bool DeformableConvPlugin::isOutputBroadcastAcrossBatch(
int output_index, const bool* input_is_broadcast,
int nb_inputs) const TRT_NOEXCEPT {
return false;
}
bool DeformableConvPlugin::canBroadcastInputAcrossBatch(int input_index) const
TRT_NOEXCEPT {
return false;
}
void DeformableConvPlugin::attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {
cublasHandle_ = cublasContext;
}
void DeformableConvPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(
nb_inputs, 3,
platform::errors::InvalidArgument(
"The number of inputs should be equal to 3, but got %d", nb_inputs));
PADDLE_ENFORCE_EQ(
nb_outputs, 1,
platform::errors::InvalidArgument(
"The number of inputs should be equal to 1, but got %d", nb_outputs));
for (int i = 0; i < input_dims[0].nbDims; i++) {
input_dim_.push_back(input_dims[0].d[i]);
}
for (int i = 0; i < input_dims[1].nbDims; i++) {
offset_dim_.push_back(input_dims[1].d[i]);
}
for (int i = 0; i < input_dims[2].nbDims; i++) {
mask_dim_.push_back(input_dims[2].d[i]);
}
for (int i = 0; i < output_dims[0].nbDims; i++) {
output_dim_.push_back(output_dims[0].d[i]);
}
}
nvinfer1::IPluginV2Ext* DeformableConvPlugin::clone() const TRT_NOEXCEPT {
return new DeformableConvPlugin(
data_type_, weights_, kernel_dims_, strides_, paddings_, dilations_,
groups_, deformable_groups_, im2col_step_, input_dim_, offset_dim_,
mask_dim_, output_dim_, with_fp16_);
}
void DeformableConvPluginCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* DeformableConvPluginCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* DeformableConvPluginCreator::getPluginName() const TRT_NOEXCEPT {
return "deformable_conv_plugin";
}
const char* DeformableConvPluginCreator::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
DeformableConvPluginCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* DeformableConvPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
nvinfer1::DataType data_type;
std::vector<int> strides, paddings, dilations, kernel_dims;
nvinfer1::Weights weights;
int groups = -1;
int deformable_groups = -1;
int im2col_step = -1;
bool with_fp16 = false;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
if (field_name.compare("data_type") == 0) {
data_type = *static_cast<const nvinfer1::DataType*>(fc->fields[i].data);
} else if (field_name.compare("strides")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
strides.insert(strides.end(), data, data + length);
} else if (field_name.compare("paddings")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
paddings.insert(paddings.end(), data, data + length);
} else if (field_name.compare("dilations")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
dilations.insert(dilations.end(), data, data + length);
} else if (field_name.compare("groups")) {
groups = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("deformable_groups")) {
deformable_groups = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("im2col_step")) {
im2col_step = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("kernel_dims")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
kernel_dims.insert(kernel_dims.end(), data, data + length);
} else if (field_name.compare("weights")) {
weights.count = fc->fields[i].length;
weights.values = fc->fields[i].data;
} else if (field_name.compare("with_fp16")) {
with_fp16 = *static_cast<const bool*>(fc->fields[i].data);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown plugin field name [%s] in the DeformableConv TRT Plugin.",
field_name));
}
}
weights.type = data_type;
return new DeformableConvPlugin(data_type, weights, kernel_dims, strides,
paddings, dilations, groups,
deformable_groups, im2col_step, with_fp16);
}
nvinfer1::IPluginV2Ext* DeformableConvPluginCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new DeformableConvPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 43aa95da78603fe5db52bfa2dc2050512c7a65fb.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <cstdio>
#include "paddle/fluid/inference/tensorrt/plugin/deformable_conv_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
static inline int ConvOutputSize(int input_size, int filter_size, int dilation,
int padding, int stride) {
const int dkernel = dilation * (filter_size - 1) + 1;
int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
return output_size;
}
nvinfer1::Weights DeformableConvPlugin::copyToDevice(const void* hostData,
size_t count) {
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
void* deviceData;
PADDLE_ENFORCE_GPU_SUCCESS(cudaMalloc(&deviceData, count * num_bytes));
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(deviceData, hostData, count * num_bytes,
cudaMemcpyHostToDevice));
return nvinfer1::Weights{data_type_, deviceData, int64_t(count)};
}
void DeformableConvPlugin::serializeFromDevice(
void** hostBuffer, const nvinfer1::Weights& deviceWeights) const {
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemcpy(static_cast<char*>(*hostBuffer), deviceWeights.values,
deviceWeights.count * num_bytes, cudaMemcpyDeviceToHost));
*hostBuffer =
reinterpret_cast<char*>(*hostBuffer) + deviceWeights.count * num_bytes;
}
nvinfer1::Weights DeformableConvPlugin::deserializeToDevice(
const void** hostBuffer, size_t count) {
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
nvinfer1::Weights w =
copyToDevice(static_cast<const char*>(*hostBuffer), count);
*hostBuffer = reinterpret_cast<const char*>(*hostBuffer) + count * num_bytes;
return w;
}
DeformableConvPlugin::DeformableConvPlugin(
const nvinfer1::DataType data_type, const nvinfer1::Weights& weights,
const std::vector<int>& kernel_dims, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& dilations,
const int groups, const int deformable_groups, const int im2col_step,
const bool with_fp16)
: data_type_(data_type),
groups_(groups),
deformable_groups_(deformable_groups),
im2col_step_(im2col_step),
with_fp16_(with_fp16) {
weights_ = copyToDevice(weights.values, weights.count);
kernel_dims_.insert(kernel_dims_.end(), kernel_dims.cbegin(),
kernel_dims.cend());
strides_.insert(strides_.end(), strides.cbegin(), strides.cend());
paddings_.insert(paddings_.end(), paddings.cbegin(), paddings.cend());
dilations_.insert(dilations_.end(), dilations.cbegin(), dilations.cend());
PADDLE_ENFORCE_EQ(data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF,
true,
platform::errors::InvalidArgument(
"The DeformableConv TRT Plugin's input type "
"should be float or half."));
PADDLE_ENFORCE_EQ(
paddings_.size(), strides_.size(),
platform::errors::InvalidArgument(
"The size of paddings (%d) is not equal to the size of strides (%d).",
paddings_.size(), strides_.size()));
}
DeformableConvPlugin::DeformableConvPlugin(
const nvinfer1::DataType data_type, const nvinfer1::Weights& weights,
const std::vector<int>& kernel_dims, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& dilations,
const int groups, const int deformable_groups, const int im2col_step,
const std::vector<int>& input_dim, const std::vector<int>& offset_dim,
const std::vector<int>& mask_dim, const std::vector<int>& output_dim,
const bool with_fp16)
: data_type_(data_type),
groups_(groups),
deformable_groups_(deformable_groups),
im2col_step_(im2col_step),
with_fp16_(with_fp16) {
weights_ = copyToDevice(weights.values, weights.count);
kernel_dims_.insert(kernel_dims_.end(), kernel_dims.cbegin(),
kernel_dims.cend());
strides_.insert(strides_.end(), strides.cbegin(), strides.cend());
paddings_.insert(paddings_.end(), paddings.cbegin(), paddings.cend());
dilations_.insert(dilations_.end(), dilations.cbegin(), dilations.cend());
input_dim_.insert(input_dim_.end(), input_dim.cbegin(), input_dim.cend());
offset_dim_.insert(offset_dim_.end(), offset_dim.cbegin(), offset_dim.cend());
mask_dim_.insert(mask_dim_.end(), mask_dim.cbegin(), mask_dim.cend());
output_dim_.insert(output_dim_.end(), output_dim.cbegin(), output_dim.cend());
PADDLE_ENFORCE_EQ(data_type_ == nvinfer1::DataType::kFLOAT ||
data_type_ == nvinfer1::DataType::kHALF,
true,
platform::errors::InvalidArgument(
"The DeformableConv TRT Plugin's input type "
"should be float or half."));
PADDLE_ENFORCE_EQ(
paddings_.size(), strides_.size(),
platform::errors::InvalidArgument(
"The size of paddings (%d) is not equal to the size of strides (%d).",
paddings_.size(), strides_.size()));
}
DeformableConvPlugin::DeformableConvPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &strides_);
DeserializeValue(&data, &length, &paddings_);
DeserializeValue(&data, &length, &dilations_);
DeserializeValue(&data, &length, &groups_);
DeserializeValue(&data, &length, &deformable_groups_);
DeserializeValue(&data, &length, &im2col_step_);
DeserializeValue(&data, &length, &kernel_dims_);
int64_t count;
DeserializeValue(&data, &length, &count);
weights_ = deserializeToDevice(&data, count);
DeserializeValue(&data, &length, &input_dim_);
DeserializeValue(&data, &length, &offset_dim_);
DeserializeValue(&data, &length, &mask_dim_);
DeserializeValue(&data, &length, &output_dim_);
DeserializeValue(&data, &length, &with_fp16_);
}
DeformableConvPlugin::~DeformableConvPlugin() {
if (weights_.values) {
cudaFree(const_cast<void*>(weights_.values));
weights_.values = nullptr;
}
}
const char* DeformableConvPlugin::getPluginType() const TRT_NOEXCEPT {
return "deformable_conv_plugin";
}
const char* DeformableConvPlugin::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
int DeformableConvPlugin::getNbOutputs() const TRT_NOEXCEPT { return 1; }
nvinfer1::Dims DeformableConvPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* inputs, int nb_input_dims) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(nb_input_dims, 3,
platform::errors::InvalidArgument(
"The number of inputs should be equal to 3, but got %d",
nb_input_dims));
nvinfer1::Dims ret;
ret.nbDims = inputs[0].nbDims;
ret.d[0] = kernel_dims_[0];
ret.d[1] = ConvOutputSize(inputs[0].d[1], kernel_dims_[2], dilations_[0],
paddings_[0], strides_[0]);
ret.d[2] = ConvOutputSize(inputs[0].d[2], kernel_dims_[3], dilations_[1],
paddings_[1], strides_[1]);
return ret;
}
bool DeformableConvPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::TensorFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
#ifdef TRT_PLUGIN_FP16_AVALIABLE
return (type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::TensorFormat::kLINEAR);
#else
return (type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::TensorFormat::kLINEAR);
#endif
} else {
return (type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::TensorFormat::kLINEAR);
}
}
size_t DeformableConvPlugin::getWorkspaceSize(int max_batch_size) const
TRT_NOEXCEPT {
int c_i = input_dim_[0], h_i = input_dim_[1], w_i = input_dim_[2];
int k_h = kernel_dims_[2], k_w = kernel_dims_[3];
int c_o = output_dim_[0], h_o = output_dim_[1], w_o = output_dim_[2];
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
size_t data_col_size = static_cast<size_t>(c_i * k_h * k_w * im2col_step_ *
h_o * w_o * num_bytes);
return data_col_size;
}
int DeformableConvPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace,
#else
void* const* outputs, void* workspace,
#endif
cudaStream_t stream) TRT_NOEXCEPT {
if (data_type_ == nvinfer1::DataType::kFLOAT) {
enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
} else if (data_type_ == nvinfer1::DataType::kHALF) {
#if TRT_PLUGIN_FP16_AVALIABLE
enqueue_impl<half>(batch_size, inputs, outputs, workspace, stream);
#else
PADDLE_THROW(platform::errors::InvalidArgument(
"Current CUDA arch dose not support fp16. Please use fp32 instead."));
#endif
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The DeformableConv TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
template <typename T>
__device__ T kFloor(T x);
template <>
__device__ half kFloor<half>(half x) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
return hfloor(x);
#endif
}
template <>
__device__ float kFloor<float>(float x) {
return floor(x);
}
template <typename T>
__device__ T DmcnIm2colBilinear(const T* bottom_data, const int data_width,
const int height, const int width, T h, T w);
template <>
__device__ float DmcnIm2colBilinear<float>(const float* bottom_data,
const int data_width,
const int height, const int width,
float h, float w) {
int h_low = kFloor<float>(h);
int w_low = kFloor<float>(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float h_low_t = h_low, w_low_t = w_low, one = 1.0f;
float lh = h - h_low_t;
float lw = w - w_low_t;
float hh = one - lh, hw = one - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <>
__device__ half DmcnIm2colBilinear<half>(const half* bottom_data,
const int data_width, const int height,
const int width, half h, half w) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int h_low = kFloor<half>(h);
int w_low = kFloor<half>(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
half h_low_t = h_low, w_low_t = w_low, one = 1.0f;
half lh = h - h_low_t;
half lw = w - w_low_t;
half hh = one - lh, hw = one - lw;
half v1 = 0;
if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low];
half v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
half v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
half v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
half w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
half val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
#endif
}
template <typename T>
__global__ void ModulatedDeformableIm2colGpuKernel(
const int nthreads, const T* data_im, const T* data_offset,
const T* data_mask, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int batch_size,
const int num_channels, const int deformable_group, const int height_col,
const int width_col, T* data_col);
template <>
__global__ void ModulatedDeformableIm2colGpuKernel<float>(
const int nthreads, const float* data_im, const float* data_offset,
const float* data_mask, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col, float* data_col) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
float minus_one = -1.0f, height_t = height, width_t = width;
for (size_t i = index; i < nthreads; i += offset) {
const int w_col = i % width_col;
const int h_col = (i / width_col) % height_col;
const int b_col = (i / width_col) / height_col % batch_size;
const int c_im = (i / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
float* data_col_ptr =
data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const float* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const float* data_offset_ptr =
data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const float* data_mask_ptr =
data_mask + (b_col * deformable_group + deformable_group_index) *
kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = 0;
float h_im_t = h_in + i * dilation_h, w_im_t = w_in + j * dilation_w;
const float h_im = h_im_t + offset_h;
const float w_im = w_im_t + offset_w;
if (h_im > minus_one && w_im > minus_one && h_im < height_t &&
w_im < width_t) {
val = DmcnIm2colBilinear<float>(data_im_ptr, width, height, width,
h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <>
__global__ void ModulatedDeformableIm2colGpuKernel<half>(
const int nthreads, const half* data_im, const half* data_offset,
const half* data_mask, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col, half* data_col) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
half minus_one = -1.0f, height_t = height, width_t = width;
for (size_t i = index; i < nthreads; i += offset) {
const int w_col = i % width_col;
const int h_col = (i / width_col) % height_col;
const int b_col = (i / width_col) / height_col % batch_size;
const int c_im = (i / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
half* data_col_ptr =
data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const half* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const half* data_offset_ptr =
data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const half* data_mask_ptr =
data_mask + (b_col * deformable_group + deformable_group_index) *
kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const int data_mask_hw_ptr =
((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const half offset_h = data_offset_ptr[data_offset_h_ptr];
const half offset_w = data_offset_ptr[data_offset_w_ptr];
const half mask = data_mask_ptr[data_mask_hw_ptr];
half val = 0;
half h_im_t = h_in + i * dilation_h, w_im_t = w_in + j * dilation_w;
const half h_im = h_im_t + offset_h;
const half w_im = w_im_t + offset_w;
if (h_im > minus_one && w_im > minus_one && h_im < height_t &&
w_im < width_t) {
val = DmcnIm2colBilinear<half>(data_im_ptr, width, height, width,
h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
#endif
}
template <typename T>
void gemm_impl(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k, const T* alpha,
const T* A, int lda, const T* B, int ldb, const T* beta, T* C,
int ldc);
template <>
void gemm_impl<float>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k,
const float* alpha, const float* A, int lda,
const float* B, int ldb, const float* beta, float* C,
int ldc) {
platform::dynload::cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda,
B, ldb, beta, C, ldc);
}
template <>
void gemm_impl<half>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k,
const half* alpha, const half* A, int lda, const half* B,
int ldb, const half* beta, half* C, int ldc) {
#if TRT_PLUGIN_FP16_AVALIABLE
platform::dynload::cublasHgemm(handle, transa, transb, m, n, k, alpha, A, lda,
B, ldb, beta, C, ldc);
#else
PADDLE_THROW(platform::errors::InvalidArgument(
"Current CUDA arch dose not support fp16. Please use fp32 instead."));
#endif
}
template <typename T>
int DeformableConvPlugin::enqueue_impl(int batch_size,
const void* const* inputs,
void* const* outputs, void* workspace,
cudaStream_t stream) {
const T* input = reinterpret_cast<const T*>(inputs[0]);
const T* offset = reinterpret_cast<const T*>(inputs[1]);
const T* mask = reinterpret_cast<const T*>(inputs[2]);
const T* filter = reinterpret_cast<const T*>(weights_.values);
T* output = reinterpret_cast<T*>(outputs[0]);
int c_i = input_dim_[0], h_i = input_dim_[1], w_i = input_dim_[2];
int k_h = kernel_dims_[2], k_w = kernel_dims_[3];
int c_o = output_dim_[0], h_o = output_dim_[1], w_o = output_dim_[2];
int input_stride = c_i * h_i * w_i;
int offset_stride = offset_dim_[0] * offset_dim_[1] * offset_dim_[2];
int mask_stride = mask_dim_[0] * mask_dim_[1] * mask_dim_[2];
int output_stride = c_o * h_o * w_o;
int M = c_o / groups_;
int N = im2col_step_ * h_o * w_o;
int K = c_i * k_h * k_w / groups_;
// c_i / deformable_groups
int channel_per_deformable_group = c_i / deformable_groups_;
// c_i * im2col_step * h_o * w_o
int num_kernels = c_i * im2col_step_ * h_o * w_o;
int blocks = NumBlocks(num_kernels);
int threads = kNumCUDAThreads;
T alpha = static_cast<T>(1.0f);
T beta = static_cast<T>(0.0f);
for (int i = 0; i < batch_size / im2col_step_; ++i) {
const T* data_im = input + i * im2col_step_ * input_stride;
const T* data_offset = offset + i * im2col_step_ * offset_stride;
const T* data_mask = mask + i * im2col_step_ * mask_stride;
T* data_col = reinterpret_cast<T*>(workspace);
ModulatedDeformableIm2colGpuKernel<T><<<blocks, threads, 0, stream>>>(
num_kernels, data_im, data_offset, data_mask, h_i, w_i, k_h, k_w,
paddings_[0], paddings_[1], strides_[0], strides_[1], dilations_[0],
dilations_[1], channel_per_deformable_group, im2col_step_, c_i,
deformable_groups_, h_o, w_o, data_col);
for (int g = 0; g < groups_; ++g) {
const T* weight = filter + g * M * K;
const T* col = data_col + g * K * N;
T* out = output + i * im2col_step_ * output_stride + g * M * N;
gemm_impl<T>(cublasHandle_, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha,
col, N, weight, K, &beta, out, N);
}
}
return 0;
}
int DeformableConvPlugin::initialize() TRT_NOEXCEPT { return 0; }
void DeformableConvPlugin::terminate() TRT_NOEXCEPT {}
size_t DeformableConvPlugin::getSerializationSize() const TRT_NOEXCEPT {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(strides_);
serialize_size += SerializedSize(paddings_);
serialize_size += SerializedSize(dilations_);
serialize_size += SerializedSize(groups_);
serialize_size += SerializedSize(deformable_groups_);
serialize_size += SerializedSize(im2col_step_);
serialize_size += SerializedSize(kernel_dims_);
serialize_size += SerializedSize(weights_.count);
int num_bytes = (data_type_ == nvinfer1::DataType::kFLOAT ? 4 : 2);
serialize_size += weights_.count * num_bytes;
serialize_size += SerializedSize(input_dim_);
serialize_size += SerializedSize(offset_dim_);
serialize_size += SerializedSize(mask_dim_);
serialize_size += SerializedSize(output_dim_);
serialize_size += SerializedSize(with_fp16_);
return serialize_size;
}
void DeformableConvPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, dilations_);
SerializeValue(&buffer, groups_);
SerializeValue(&buffer, deformable_groups_);
SerializeValue(&buffer, im2col_step_);
SerializeValue(&buffer, kernel_dims_);
SerializeValue(&buffer, weights_.count);
serializeFromDevice(&buffer, weights_);
SerializeValue(&buffer, input_dim_);
SerializeValue(&buffer, offset_dim_);
SerializeValue(&buffer, mask_dim_);
SerializeValue(&buffer, output_dim_);
SerializeValue(&buffer, with_fp16_);
}
void DeformableConvPlugin::destroy() TRT_NOEXCEPT {}
void DeformableConvPlugin::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* DeformableConvPlugin::getPluginNamespace() const TRT_NOEXCEPT {
return namespace_.c_str();
}
nvinfer1::DataType DeformableConvPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type,
int nb_inputs) const TRT_NOEXCEPT {
return input_type[0];
}
bool DeformableConvPlugin::isOutputBroadcastAcrossBatch(
int output_index, const bool* input_is_broadcast,
int nb_inputs) const TRT_NOEXCEPT {
return false;
}
bool DeformableConvPlugin::canBroadcastInputAcrossBatch(int input_index) const
TRT_NOEXCEPT {
return false;
}
void DeformableConvPlugin::attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext,
nvinfer1::IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {
cublasHandle_ = cublasContext;
}
void DeformableConvPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(
nb_inputs, 3,
platform::errors::InvalidArgument(
"The number of inputs should be equal to 3, but got %d", nb_inputs));
PADDLE_ENFORCE_EQ(
nb_outputs, 1,
platform::errors::InvalidArgument(
"The number of inputs should be equal to 1, but got %d", nb_outputs));
for (int i = 0; i < input_dims[0].nbDims; i++) {
input_dim_.push_back(input_dims[0].d[i]);
}
for (int i = 0; i < input_dims[1].nbDims; i++) {
offset_dim_.push_back(input_dims[1].d[i]);
}
for (int i = 0; i < input_dims[2].nbDims; i++) {
mask_dim_.push_back(input_dims[2].d[i]);
}
for (int i = 0; i < output_dims[0].nbDims; i++) {
output_dim_.push_back(output_dims[0].d[i]);
}
}
nvinfer1::IPluginV2Ext* DeformableConvPlugin::clone() const TRT_NOEXCEPT {
return new DeformableConvPlugin(
data_type_, weights_, kernel_dims_, strides_, paddings_, dilations_,
groups_, deformable_groups_, im2col_step_, input_dim_, offset_dim_,
mask_dim_, output_dim_, with_fp16_);
}
void DeformableConvPluginCreator::setPluginNamespace(const char* lib_namespace)
TRT_NOEXCEPT {
namespace_ = std::string(lib_namespace);
}
const char* DeformableConvPluginCreator::getPluginNamespace() const
TRT_NOEXCEPT {
return namespace_.c_str();
}
const char* DeformableConvPluginCreator::getPluginName() const TRT_NOEXCEPT {
return "deformable_conv_plugin";
}
const char* DeformableConvPluginCreator::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
const nvinfer1::PluginFieldCollection*
DeformableConvPluginCreator::getFieldNames() TRT_NOEXCEPT {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* DeformableConvPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT {
const nvinfer1::PluginField* fields = fc->fields;
nvinfer1::DataType data_type;
std::vector<int> strides, paddings, dilations, kernel_dims;
nvinfer1::Weights weights;
int groups = -1;
int deformable_groups = -1;
int im2col_step = -1;
bool with_fp16 = false;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
if (field_name.compare("data_type") == 0) {
data_type = *static_cast<const nvinfer1::DataType*>(fc->fields[i].data);
} else if (field_name.compare("strides")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
strides.insert(strides.end(), data, data + length);
} else if (field_name.compare("paddings")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
paddings.insert(paddings.end(), data, data + length);
} else if (field_name.compare("dilations")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
dilations.insert(dilations.end(), data, data + length);
} else if (field_name.compare("groups")) {
groups = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("deformable_groups")) {
deformable_groups = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("im2col_step")) {
im2col_step = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("kernel_dims")) {
const int length = fc->fields[i].length;
const int* data = static_cast<const int*>(fc->fields[i].data);
kernel_dims.insert(kernel_dims.end(), data, data + length);
} else if (field_name.compare("weights")) {
weights.count = fc->fields[i].length;
weights.values = fc->fields[i].data;
} else if (field_name.compare("with_fp16")) {
with_fp16 = *static_cast<const bool*>(fc->fields[i].data);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown plugin field name [%s] in the DeformableConv TRT Plugin.",
field_name));
}
}
weights.type = data_type;
return new DeformableConvPlugin(data_type, weights, kernel_dims, strides,
paddings, dilations, groups,
deformable_groups, im2col_step, with_fp16);
}
nvinfer1::IPluginV2Ext* DeformableConvPluginCreator::deserializePlugin(
const char* name, const void* serial_data,
size_t serial_length) TRT_NOEXCEPT {
auto plugin = new DeformableConvPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
0aa35e191e9a911ad426400c8217f74dbc3f9e75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mpi.h>
#include <stdio.h>
#include <conf.h>
#include "inc/conf.h"
#include "utils/msg.h"
#include "mpi/wrapper.h" /* mini-MPI and -device */
#include "mpi/glb.h"
#include "d/api.h"
#include "utils/error.h"
#include "utils/cc.h"
#include "utils/mc.h"
#include "utils/kl.h"
#include "conf/imp.h"
#include "inc/type.h"
#include "inc/dev.h"
#include "dbg/imp.h"
#include "coords/ini.h"
#include "coords/imp.h"
const int n = 10;
Particle *pp;
Force *ff;
void alloc() {
Dalloc(&pp, n);
Dalloc(&ff, n);
}
void free() {
Dfree(pp);
Dfree(ff);
}
namespace dev {
__global__ void fill_bugs(int3 L, Particle *pp, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Particle p;
p.r[0] = p.r[1] = p.r[2] = 0;
p.v[0] = p.v[1] = p.v[2] = 0;
if (i >= n) return;
if (i == 1) p.r[0] = 1.5 * L.x; // invalid position
if (i < 1) p.v[0] = 0.f / 0.f; // nan
pp[i] = p;
}
__global__ void fill_bugs(Force *ff, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Force f;
f.f[0] = f.f[1] = f.f[2] = 0;
if (i >= n) return;
if (i < 1) f.f[0] = 1.f / 0.f; // inf
ff[i] = f;
}
} // dev
void fill_bugs(int3 L) {
KL(dev::fill_bugs, (k_cnf(n)), (L, pp, n));
KL(dev::fill_bugs, (k_cnf(n)), (ff, n));
}
void check(float dt, const Coords *c, Dbg *dbg) {
UC(dbg_check_pos (c, "flu", dbg, n, pp));
UC(dbg_check_vel (dt, c, "flu", dbg, n, pp));
UC(dbg_check_forces (dt, c, "flu.ff", dbg, n, pp, ff));
}
int main(int argc, char **argv) {
Dbg *dbg;
Config *cfg;
Coords *coords;
int3 L;
float dt;
int dims[3];
MPI_Comm cart;
m::ini(&argc, &argv);
m::get_dims(&argc, &argv, dims);
m::get_cart(MPI_COMM_WORLD, dims, &cart);
UC(conf_ini(&cfg));
UC(dbg_ini(&dbg));
UC(conf_read(argc, argv, cfg));
UC(conf_lookup_float(cfg, "time.dt", &dt));
UC(dbg_set_conf(cfg, dbg));
UC(coords_ini_conf(cart, cfg, &coords));
L = subdomain(coords);
alloc();
fill_bugs(L);
check(dt, coords, dbg);
free();
UC(dbg_fin(dbg));
UC(conf_fin(cfg));
UC(coords_fin(coords));
MC(m::Barrier(cart));
m::fin();
}
| 0aa35e191e9a911ad426400c8217f74dbc3f9e75.cu | #include <mpi.h>
#include <stdio.h>
#include <conf.h>
#include "inc/conf.h"
#include "utils/msg.h"
#include "mpi/wrapper.h" /* mini-MPI and -device */
#include "mpi/glb.h"
#include "d/api.h"
#include "utils/error.h"
#include "utils/cc.h"
#include "utils/mc.h"
#include "utils/kl.h"
#include "conf/imp.h"
#include "inc/type.h"
#include "inc/dev.h"
#include "dbg/imp.h"
#include "coords/ini.h"
#include "coords/imp.h"
const int n = 10;
Particle *pp;
Force *ff;
void alloc() {
Dalloc(&pp, n);
Dalloc(&ff, n);
}
void free() {
Dfree(pp);
Dfree(ff);
}
namespace dev {
__global__ void fill_bugs(int3 L, Particle *pp, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Particle p;
p.r[0] = p.r[1] = p.r[2] = 0;
p.v[0] = p.v[1] = p.v[2] = 0;
if (i >= n) return;
if (i == 1) p.r[0] = 1.5 * L.x; // invalid position
if (i < 1) p.v[0] = 0.f / 0.f; // nan
pp[i] = p;
}
__global__ void fill_bugs(Force *ff, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
Force f;
f.f[0] = f.f[1] = f.f[2] = 0;
if (i >= n) return;
if (i < 1) f.f[0] = 1.f / 0.f; // inf
ff[i] = f;
}
} // dev
void fill_bugs(int3 L) {
KL(dev::fill_bugs, (k_cnf(n)), (L, pp, n));
KL(dev::fill_bugs, (k_cnf(n)), (ff, n));
}
void check(float dt, const Coords *c, Dbg *dbg) {
UC(dbg_check_pos (c, "flu", dbg, n, pp));
UC(dbg_check_vel (dt, c, "flu", dbg, n, pp));
UC(dbg_check_forces (dt, c, "flu.ff", dbg, n, pp, ff));
}
int main(int argc, char **argv) {
Dbg *dbg;
Config *cfg;
Coords *coords;
int3 L;
float dt;
int dims[3];
MPI_Comm cart;
m::ini(&argc, &argv);
m::get_dims(&argc, &argv, dims);
m::get_cart(MPI_COMM_WORLD, dims, &cart);
UC(conf_ini(&cfg));
UC(dbg_ini(&dbg));
UC(conf_read(argc, argv, cfg));
UC(conf_lookup_float(cfg, "time.dt", &dt));
UC(dbg_set_conf(cfg, dbg));
UC(coords_ini_conf(cart, cfg, &coords));
L = subdomain(coords);
alloc();
fill_bugs(L);
check(dt, coords, dbg);
free();
UC(dbg_fin(dbg));
UC(conf_fin(cfg));
UC(coords_fin(coords));
MC(m::Barrier(cart));
m::fin();
}
|
dcda3e7630e770c8015982663e64ae715fcb2b7c.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <hip/hip_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%32)<=7){
for(unsigned k=0; k<iterations;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} | dcda3e7630e770c8015982663e64ae715fcb2b7c.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <cuda_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%32)<=7){
for(unsigned k=0; k<iterations;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} |
30bef8abe112811a7aee016d64e5343151e5c2b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ppm.h"
#include <math.h>
#include <iostream>
#include <stdio.h>
__global__
void colorToGreyScaleConversion(int* imdata,int* outimdata,int size){
int dex= 3*(threadIdx.x+blockIdx.x*blockDim.x);
if (dex>= size) return;
int r=imdata[dex];
int g=imdata[dex+1];
int b=imdata[dex+2];
int grey= round(255*( 0.21*(r/255.0)+0.71*(g/255.0)+0.07*(b/255.0)));
// printf("Grey value is : ")
outimdata[dex]=grey;
outimdata[dex+1]=grey;
outimdata[dex+2]=grey;
}
int main(){
ppm football("football.ppm");
int size=3*football.height*football.width;
int arsize=sizeof(int)*size;
std::cout <<"Size is: "<< size;
int* d_football_data;
int* d_gfootball_data;
hipMalloc((void**)&d_football_data,arsize);
hipMalloc((void**)&d_gfootball_data,arsize );
hipMemcpy(d_football_data,football.data,arsize,hipMemcpyHostToDevice);
hipMemcpy(d_gfootball_data,football.data,arsize,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( colorToGreyScaleConversion), dim3(1<<20) ,dim3(256), 0, 0, 0, 0, d_football_data,d_gfootball_data,size);
ppm gfootball(football);
hipMemcpy(gfootball.data,d_gfootball_data,arsize,hipMemcpyDeviceToHost);
gfootball.write("gfootball.ppm");
hipFree(d_gfootball_data);
hipFree(d_football_data);
}
| 30bef8abe112811a7aee016d64e5343151e5c2b8.cu | #include "ppm.h"
#include <math.h>
#include <iostream>
#include <stdio.h>
__global__
void colorToGreyScaleConversion(int* imdata,int* outimdata,int size){
int dex= 3*(threadIdx.x+blockIdx.x*blockDim.x);
if (dex>= size) return;
int r=imdata[dex];
int g=imdata[dex+1];
int b=imdata[dex+2];
int grey= round(255*( 0.21*(r/255.0)+0.71*(g/255.0)+0.07*(b/255.0)));
// printf("Grey value is : ")
outimdata[dex]=grey;
outimdata[dex+1]=grey;
outimdata[dex+2]=grey;
}
int main(){
ppm football("football.ppm");
int size=3*football.height*football.width;
int arsize=sizeof(int)*size;
std::cout <<"Size is: "<< size;
int* d_football_data;
int* d_gfootball_data;
cudaMalloc((void**)&d_football_data,arsize);
cudaMalloc((void**)&d_gfootball_data,arsize );
cudaMemcpy(d_football_data,football.data,arsize,cudaMemcpyHostToDevice);
cudaMemcpy(d_gfootball_data,football.data,arsize,cudaMemcpyHostToDevice);
colorToGreyScaleConversion<<< 1<<20 ,256>>>(d_football_data,d_gfootball_data,size);
ppm gfootball(football);
cudaMemcpy(gfootball.data,d_gfootball_data,arsize,cudaMemcpyDeviceToHost);
gfootball.write("gfootball.ppm");
cudaFree(d_gfootball_data);
cudaFree(d_football_data);
}
|
35a3c3f1c580c76f4b0ae887a14c4fbcfd59bb34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <hip/hip_cooperative_groups.h>
//#include <helper_cuda.h>
__global__ void
ac(float *A, const int *B, const int *C, const int *op_sel, int n_inputs, int n_arith, int iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
int a_off= i*(n_inputs + n_arith) + n_inputs;
int idx_off= i*(n_inputs + n_arith);
for (int k=0; k<iter; k++) {
for (int j=0; j <n_arith; j++ ) {
if (op_sel[j] == 0)
A[a_off + j] = A[idx_off + B[j]] + A[idx_off + C[j]];
else
A[a_off + j] = A[idx_off + B[j]] * A[idx_off + C[j]];
}
}
}
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
int n_inputs= 32;
int n_arith= 256;
int batch_size= 2048;
int iter= 4096;
int n_tot= n_inputs + n_arith;
size_t size= batch_size * (n_tot) * sizeof(float);
size_t size_idx= n_arith * sizeof(int);
float *h_A= (float *)malloc(size);
int *h_B= (int *)malloc(size_idx);
int *h_C= (int *)malloc(size_idx);
int *h_op_sel= (int *) malloc(size_idx);
// Initialize the host input vectors
for (int i = 0; i < n_arith; ++i)
{
h_B[i] = rand() % (n_inputs + i);
h_C[i] = rand() % (n_inputs + i);
h_op_sel[i]= rand() % 2;
}
for (int i= 0; i < n_inputs; ++i) {
for (int b =0; b< batch_size; ++b) {
h_A[b* n_tot + i]= float(rand());
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = hipMalloc((void **)&d_B, size_idx);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = hipMalloc((void **)&d_C, size_idx);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_op_sel = NULL;
err = hipMalloc((void **)&d_op_sel, size_idx);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size_idx, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_C, h_C, size_idx, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_op_sel, h_op_sel, size_idx, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 64;
int blocksPerGrid= (batch_size + threadsPerBlock -1)/ threadsPerBlock;
struct timeval t1, t2;
// Perform Warmup
hipLaunchKernelGGL(( ac), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, iter);
// FInish execution of kernel
hipDeviceSynchronize();
gettimeofday(&t1, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( ac), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, iter);
// FInish execution of kernel
hipDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time of kernel: %3.4f ms \n", time);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Throughput: %.3f Gops/sec\n", (((1.0*batch_size*iter*n_arith))/time)/10E6);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//for (int i=0; i<numElements; i++) {
for (int i=0; i<8; i++) {
printf("%d , %f\n", i, h_A[i]);
}
err = hipFree(d_A);
err = hipFree(d_B);
err = hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
printf("Done!\n");
return 0;
}
| 35a3c3f1c580c76f4b0ae887a14c4fbcfd59bb34.cu |
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cooperative_groups.h>
//#include <helper_cuda.h>
__global__ void
ac(float *A, const int *B, const int *C, const int *op_sel, int n_inputs, int n_arith, int iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
int a_off= i*(n_inputs + n_arith) + n_inputs;
int idx_off= i*(n_inputs + n_arith);
for (int k=0; k<iter; k++) {
for (int j=0; j <n_arith; j++ ) {
if (op_sel[j] == 0)
A[a_off + j] = A[idx_off + B[j]] + A[idx_off + C[j]];
else
A[a_off + j] = A[idx_off + B[j]] * A[idx_off + C[j]];
}
}
}
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int n_inputs= 32;
int n_arith= 256;
int batch_size= 2048;
int iter= 4096;
int n_tot= n_inputs + n_arith;
size_t size= batch_size * (n_tot) * sizeof(float);
size_t size_idx= n_arith * sizeof(int);
float *h_A= (float *)malloc(size);
int *h_B= (int *)malloc(size_idx);
int *h_C= (int *)malloc(size_idx);
int *h_op_sel= (int *) malloc(size_idx);
// Initialize the host input vectors
for (int i = 0; i < n_arith; ++i)
{
h_B[i] = rand() % (n_inputs + i);
h_C[i] = rand() % (n_inputs + i);
h_op_sel[i]= rand() % 2;
}
for (int i= 0; i < n_inputs; ++i) {
for (int b =0; b< batch_size; ++b) {
h_A[b* n_tot + i]= float(rand());
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_op_sel = NULL;
err = cudaMalloc((void **)&d_op_sel, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_C, h_C, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_op_sel, h_op_sel, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 64;
int blocksPerGrid= (batch_size + threadsPerBlock -1)/ threadsPerBlock;
struct timeval t1, t2;
// Perform Warmup
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t1, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time of kernel: %3.4f ms \n", time);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Throughput: %.3f Gops/sec\n", (((1.0*batch_size*iter*n_arith))/time)/10E6);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//for (int i=0; i<numElements; i++) {
for (int i=0; i<8; i++) {
printf("%d , %f\n", i, h_A[i]);
}
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
printf("Done!\n");
return 0;
}
|
a2986946d49523baa97c308d62b56fed6cc16a6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"{
#include <stdio.h>
#include <omp.h>
__global__
void
matmultgpu1(int m, int n, int k, double *A, double *B, double *C) {
int i1,i2,i3;
for(i1 = 0; i1< m; i1++){
for(i2 = 0; i2 < n; i2++){
C[i1*n+i2]=0;
for(i3 = 0; i3 < k; i3++){
C[i1*n+i2]+=A[i1*k+i3]*B[i3*n+i2];
}
}
}
}
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C){
double *d_A, *d_B, *d_C;
int sizeA = m * k *sizeof(double);
int sizeB = k * n *sizeof(double);
int sizeC = m * n *sizeof(double);
double time1, time2, elapsed;
//Alloc memory on the device
hipMalloc((void**)&d_A,sizeA);
hipMalloc((void**)&d_B,sizeB);
hipMalloc((void**)&d_C,sizeC);
time1 = omp_get_wtime();
hipMemcpy(d_A,A,sizeA,hipMemcpyHostToDevice);
hipMemcpy(d_B,B,sizeB,hipMemcpyHostToDevice);
time2 = omp_get_wtime();
hipLaunchKernelGGL(( matmultgpu1), dim3(1),dim3(1), 0, 0, m,n,k,d_A,d_B,d_C);
hipDeviceSynchronize();
elapsed = omp_get_wtime() - time2;
printf("Kernel time: %f\n", elapsed);
hipMemcpy(C,d_C,sizeC,hipMemcpyDeviceToHost);
elapsed = omp_get_wtime() - time1;
printf("Kernel+copy time: %f\n", elapsed);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
}
| a2986946d49523baa97c308d62b56fed6cc16a6b.cu | extern "C"{
#include <stdio.h>
#include <omp.h>
__global__
void
matmultgpu1(int m, int n, int k, double *A, double *B, double *C) {
int i1,i2,i3;
for(i1 = 0; i1< m; i1++){
for(i2 = 0; i2 < n; i2++){
C[i1*n+i2]=0;
for(i3 = 0; i3 < k; i3++){
C[i1*n+i2]+=A[i1*k+i3]*B[i3*n+i2];
}
}
}
}
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C){
double *d_A, *d_B, *d_C;
int sizeA = m * k *sizeof(double);
int sizeB = k * n *sizeof(double);
int sizeC = m * n *sizeof(double);
double time1, time2, elapsed;
//Alloc memory on the device
cudaMalloc((void**)&d_A,sizeA);
cudaMalloc((void**)&d_B,sizeB);
cudaMalloc((void**)&d_C,sizeC);
time1 = omp_get_wtime();
cudaMemcpy(d_A,A,sizeA,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,sizeB,cudaMemcpyHostToDevice);
time2 = omp_get_wtime();
matmultgpu1<<<1,1>>>(m,n,k,d_A,d_B,d_C);
cudaDeviceSynchronize();
elapsed = omp_get_wtime() - time2;
printf("Kernel time: %f\n", elapsed);
cudaMemcpy(C,d_C,sizeC,cudaMemcpyDeviceToHost);
elapsed = omp_get_wtime() - time1;
printf("Kernel+copy time: %f\n", elapsed);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
}
|
25e132d9b339a3fce7f42114a718a637972e2db9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "copyPrimes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dev_chunk = NULL;
hipMalloc(&dev_chunk, XSIZE*YSIZE);
number_type *base_index_arr = NULL;
hipMalloc(&base_index_arr, XSIZE*YSIZE);
number_type *primes_arr = NULL;
hipMalloc(&primes_arr, XSIZE*YSIZE);
const number_type startValue = 1;
const number_type endValue = 1;
const int thread_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
copyPrimes), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_chunk,base_index_arr,primes_arr,startValue,endValue,thread_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
copyPrimes), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_chunk,base_index_arr,primes_arr,startValue,endValue,thread_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
copyPrimes), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_chunk,base_index_arr,primes_arr,startValue,endValue,thread_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 25e132d9b339a3fce7f42114a718a637972e2db9.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "copyPrimes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dev_chunk = NULL;
cudaMalloc(&dev_chunk, XSIZE*YSIZE);
number_type *base_index_arr = NULL;
cudaMalloc(&base_index_arr, XSIZE*YSIZE);
number_type *primes_arr = NULL;
cudaMalloc(&primes_arr, XSIZE*YSIZE);
const number_type startValue = 1;
const number_type endValue = 1;
const int thread_size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
copyPrimes<<<gridBlock,threadBlock>>>(dev_chunk,base_index_arr,primes_arr,startValue,endValue,thread_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
copyPrimes<<<gridBlock,threadBlock>>>(dev_chunk,base_index_arr,primes_arr,startValue,endValue,thread_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
copyPrimes<<<gridBlock,threadBlock>>>(dev_chunk,base_index_arr,primes_arr,startValue,endValue,thread_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c385742e2bd2d7ddd9a01e6b920581be9099454a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/fused_distributed_worklist.h>
#include <groute/fused_worker.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <groute/graphs/fused_solver.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <utils/markers.h>
#include "bfs_common.h"
DECLARE_int32(source_node);
DEFINE_bool(exitonerror, false, "exit on error");
namespace bfs {
namespace opt {
const level_t INF = UINT_MAX;
struct LevelData
{
index_t node;
level_t level;
__device__ __host__ __forceinline__ LevelData(index_t node, level_t level) : node(node), level(level) { }
__device__ __host__ __forceinline__ LevelData() : node(INF), level(INF) { }
};
typedef index_t local_work_t;
typedef LevelData remote_work_t;
__global__ void BFSInit(level_t* levels, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
levels[tid] = INF;
}
}
template<
typename TGraph,
typename TGraphDatum>
struct BFSWorkNP
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<level_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = levels_datum.get_item(node) + 1;
}
groute::dev::CTAWorkScheduler<level_t>::template schedule(
np_local,
[&graph, &levels_datum, &rwl_in, &rwl_out](index_t edge, level_t next_level)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot_sync(__activemask(), is_owned ? 1 : 0);
int remote_mask = __ballot_sync(__activemask(), is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
);
}
}
};
template<
typename TGraph,
typename TGraphDatum>
struct BFSWork
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
level_t next_level = levels_datum.get_item(node) + 1;
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot_sync(__activemask(), is_owned ? 1 : 0);
int remote_mask = __ballot_sync(__activemask(), is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
}
}
};
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<level_t> m_levels_datum;
public:
template<typename...UnusedData>
SplitOps(const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<level_t>& levels_datum, UnusedData&... data)
: m_graph_seg(graph_seg), m_levels_datum(levels_datum)
{
}
__device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.level < atomicMin(m_levels_datum.get_item_ptr(work.node), work.level))
? groute::opt::SF_Take
: groute::opt::SF_None; // filter
}
return groute::opt::SF_Pass;
}
__device__ __forceinline__ bool is_high_prio(const local_work_t& work, const level_t& global_prio)
{
return m_levels_datum[work] <= global_prio;
}
__device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::opt::SF_Take
: groute::opt::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return LevelData(work, m_levels_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<typename TGraph, typename TGraphDatum>
struct FusedProblem
{
TGraph m_graph;
TGraphDatum m_levels_datum;
typedef BFSWork<TGraph, TGraphDatum> WorkType;
typedef BFSWorkNP<TGraph, TGraphDatum> WorkTypeNP;
public:
FusedProblem(const TGraph& graph, const TGraphDatum& levels_datum) :
m_graph(graph), m_levels_datum(levels_datum)
{
}
// Called before a global CPU+GPU barrier
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_levels_datum.size);
hipLaunchKernelGGL(( BFSInit) , dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
m_levels_datum.data_ptr, m_levels_datum.size);
}
bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
return false; // no work was done here
}
void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
if (FLAGS_iteration_fusion)
{
if (FLAGS_cta_np)
{
hipLaunchKernelGGL(( groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >)
, dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
hipLaunchKernelGGL(( groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >)
, dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
else
{
if (FLAGS_cta_np)
{
hipLaunchKernelGGL(( groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >)
, dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream ,
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
}
};
struct Algo
{
static const char* NameLower() { return "bfs"; }
static const char* Name() { return "BFS"; }
static void Init(
groute::graphs::traversal::Context<bfs::opt::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::opt::DistributedWorklist<local_work_t, remote_work_t, bfs::opt::SplitOps>& distributed_worklist)
{
index_t source_node = min(max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// Report the initial work
distributed_worklist.ReportHighPrioWork(1, 0, "Host", groute::Device::Host, true);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<typename TGraphAllocator, typename TGraphDatum, typename...UnusedData>
static std::vector<level_t> Gather(TGraphAllocator& graph_allocator, TGraphDatum& levels_datum, UnusedData&... data)
{
graph_allocator.GatherDatum(levels_datum);
return levels_datum.GetHostData();
}
template<typename...UnusedData>
static std::vector<level_t> Host(groute::graphs::host::CSRGraph& graph, UnusedData&... data)
{
return BFSHost(graph, min(max(0, FLAGS_source_node), graph.nnodes - 1));
}
static int Output(const char *file, const std::vector<level_t>& levels)
{
return BFSOutput(file, levels);
}
static int CheckErrors(const std::vector<level_t>& levels, const std::vector<level_t>& regression)
{
return BFSCheckErrors(levels, regression);
}
};
}
}
bool TestBFSAsyncMultiOptimized(int ngpus)
{
typedef bfs::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> ProblemType;
typedef groute::graphs::traversal::FusedSolver<
bfs::opt::Algo, ProblemType,
bfs::opt::local_work_t , bfs::opt::remote_work_t, level_t,
bfs::opt::SplitOps,
groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> SolverType;
groute::graphs::traversal::__MultiRunner__Opt__ <
bfs::opt::Algo,
ProblemType,
SolverType,
bfs::opt::SplitOps,
bfs::opt::local_work_t,
bfs::opt::remote_work_t,
groute::graphs::multi::NodeOutputGlobalDatum<level_t> > runner;
groute::graphs::multi::NodeOutputGlobalDatum<level_t> levels_datum;
bool retval = runner(ngpus, levels_datum);
if(FLAGS_exitonerror && !retval)
exit(100);
return retval;
}
| c385742e2bd2d7ddd9a01e6b920581be9099454a.cu | // Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <gflags/gflags.h>
#include <groute/event_pool.h>
#include <groute/fused_distributed_worklist.h>
#include <groute/fused_worker.h>
#include <groute/cta_work.h>
#include <groute/graphs/csr_graph.h>
#include <groute/graphs/traversal_algo.h>
#include <groute/graphs/fused_solver.h>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <utils/markers.h>
#include "bfs_common.h"
DECLARE_int32(source_node);
DEFINE_bool(exitonerror, false, "exit on error");
namespace bfs {
namespace opt {
const level_t INF = UINT_MAX;
struct LevelData
{
index_t node;
level_t level;
__device__ __host__ __forceinline__ LevelData(index_t node, level_t level) : node(node), level(level) { }
__device__ __host__ __forceinline__ LevelData() : node(INF), level(INF) { }
};
typedef index_t local_work_t;
typedef LevelData remote_work_t;
__global__ void BFSInit(level_t* levels, int nnodes)
{
int tid = GTID;
if (tid < nnodes)
{
levels[tid] = INF;
}
}
template<
typename TGraph,
typename TGraphDatum>
struct BFSWorkNP
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads)
{
groute::dev::np_local<level_t> np_local = { 0, 0, 0 };
if (i < work_size)
{
index_t node = work_source.get_work(i);
np_local.start = graph.begin_edge(node);
np_local.size = graph.end_edge(node) - np_local.start;
np_local.meta_data = levels_datum.get_item(node) + 1;
}
groute::dev::CTAWorkScheduler<level_t>::template schedule(
np_local,
[&graph, &levels_datum, &rwl_in, &rwl_out](index_t edge, level_t next_level)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot_sync(__activemask(), is_owned ? 1 : 0);
int remote_mask = __ballot_sync(__activemask(), is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
);
}
}
};
template<
typename TGraph,
typename TGraphDatum>
struct BFSWork
{
template<typename WorkSource>
__device__ static void work(
const WorkSource& work_source,
groute::dev::CircularWorklist<local_work_t>& rwl_in,
groute::dev::CircularWorklist<remote_work_t>& rwl_out,
const TGraph& graph, TGraphDatum& levels_datum
)
{
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
for (uint32_t i = 0 + tid; i < work_size; i += nthreads)
{
index_t node = work_source.get_work(i);
level_t next_level = levels_datum.get_item(node) + 1;
for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge)
{
index_t dest = graph.edge_dest(edge);
if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level))
{
int is_owned = graph.owns(dest);
// TODO: move ballot logic to a device structure
int owned_mask = __ballot_sync(__activemask(), is_owned ? 1 : 0);
int remote_mask = __ballot_sync(__activemask(), is_owned ? 0 : 1);
if (is_owned)
{
int high_leader = __ffs(owned_mask) - 1;
int thread_offset = __popc(owned_mask & ((1 << lane_id()) - 1));
rwl_in.prepend_warp(dest, high_leader, __popc(owned_mask), thread_offset);
}
else
{
int low_leader = __ffs(remote_mask) - 1;
int thread_offset = __popc(remote_mask & ((1 << lane_id()) - 1));
rwl_out.append_warp(LevelData(dest, next_level), low_leader, __popc(remote_mask), thread_offset);
}
}
}
}
}
};
struct SplitOps
{
private:
groute::graphs::dev::CSRGraphSeg m_graph_seg;
groute::graphs::dev::GraphDatum<level_t> m_levels_datum;
public:
template<typename...UnusedData>
SplitOps(const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<level_t>& levels_datum, UnusedData&... data)
: m_graph_seg(graph_seg), m_levels_datum(levels_datum)
{
}
__device__ __forceinline__ groute::opt::SplitFlags on_receive(const remote_work_t& work)
{
if (m_graph_seg.owns(work.node))
{
return (work.level < atomicMin(m_levels_datum.get_item_ptr(work.node), work.level))
? groute::opt::SF_Take
: groute::opt::SF_None; // filter
}
return groute::opt::SF_Pass;
}
__device__ __forceinline__ bool is_high_prio(const local_work_t& work, const level_t& global_prio)
{
return m_levels_datum[work] <= global_prio;
}
__device__ __forceinline__ groute::opt::SplitFlags on_send(local_work_t work)
{
return (m_graph_seg.owns(work))
? groute::opt::SF_Take
: groute::opt::SF_Pass;
}
__device__ __forceinline__ remote_work_t pack(local_work_t work)
{
return LevelData(work, m_levels_datum.get_item(work));
}
__device__ __forceinline__ local_work_t unpack(const remote_work_t& work)
{
return work.node;
}
};
template<typename TGraph, typename TGraphDatum>
struct FusedProblem
{
TGraph m_graph;
TGraphDatum m_levels_datum;
typedef BFSWork<TGraph, TGraphDatum> WorkType;
typedef BFSWorkNP<TGraph, TGraphDatum> WorkTypeNP;
public:
FusedProblem(const TGraph& graph, const TGraphDatum& levels_datum) :
m_graph(graph), m_levels_datum(levels_datum)
{
}
// Called before a global CPU+GPU barrier
void Init(groute::Stream& stream) const
{
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, m_levels_datum.size);
BFSInit <<< grid_dims, block_dims, 0, stream.cuda_stream >>>(
m_levels_datum.data_ptr, m_levels_datum.size);
}
bool DoFusedInit(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
return false; // no work was done here
}
void DoFusedWork(groute::Worklist<local_work_t>* lwl_high, groute::Worklist<local_work_t>* lwl_low,
groute::CircularWorklist<local_work_t>* rwl_in, groute::CircularWorklist<remote_work_t>* rwl_out,
int fused_chunk_size, level_t global_prio,
volatile int *high_work_counter, volatile int *low_work_counter,
uint32_t *kernel_internal_counter, volatile int *send_signal_ptr,
cub::GridBarrierLifetime& barrier_lifetime,
dim3 grid_dims, dim3 block_dims, groute::Stream& stream)
{
if (FLAGS_iteration_fusion)
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >
<<< grid_dims, block_dims, 0, stream.cuda_stream >>> (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
groute::FusedWork <
groute::NeverStop, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >
<<< grid_dims, block_dims, 0, stream.cuda_stream >>> (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
else
{
if (FLAGS_cta_np)
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkTypeNP,
TGraph, TGraphDatum >
<<< grid_dims, block_dims, 0, stream.cuda_stream >>> (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
else
{
groute::FusedWork <
groute::RunNTimes<1>, local_work_t, remote_work_t, level_t, SplitOps,
WorkType,
TGraph, TGraphDatum >
<< < grid_dims, block_dims, 0, stream.cuda_stream >> > (
lwl_high->DeviceObject(), lwl_low->DeviceObject(),
rwl_in->DeviceObject(), rwl_out->DeviceObject(),
fused_chunk_size, global_prio,
high_work_counter, low_work_counter,
kernel_internal_counter, send_signal_ptr,
barrier_lifetime,
bfs::opt::SplitOps(m_graph, m_levels_datum),
m_graph, m_levels_datum
);
}
}
}
};
struct Algo
{
static const char* NameLower() { return "bfs"; }
static const char* Name() { return "BFS"; }
static void Init(
groute::graphs::traversal::Context<bfs::opt::Algo>& context,
groute::graphs::multi::CSRGraphAllocator& graph_manager,
groute::router::Router<remote_work_t>& worklist_router,
groute::opt::DistributedWorklist<local_work_t, remote_work_t, bfs::opt::SplitOps>& distributed_worklist)
{
index_t source_node = min(max(0, FLAGS_source_node), context.host_graph.nnodes - 1);
auto partitioner = graph_manager.GetGraphPartitioner();
if (partitioner->NeedsReverseLookup())
{
source_node = partitioner->GetReverseLookupFunc()(source_node);
}
// Report the initial work
distributed_worklist.ReportHighPrioWork(1, 0, "Host", groute::Device::Host, true);
std::vector<remote_work_t> initial_work;
initial_work.push_back(remote_work_t(source_node, 0));
groute::router::ISender<remote_work_t>* work_sender = worklist_router.GetSender(groute::Device::Host);
work_sender->Send(
groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event());
work_sender->Shutdown();
}
template<typename TGraphAllocator, typename TGraphDatum, typename...UnusedData>
static std::vector<level_t> Gather(TGraphAllocator& graph_allocator, TGraphDatum& levels_datum, UnusedData&... data)
{
graph_allocator.GatherDatum(levels_datum);
return levels_datum.GetHostData();
}
template<typename...UnusedData>
static std::vector<level_t> Host(groute::graphs::host::CSRGraph& graph, UnusedData&... data)
{
return BFSHost(graph, min(max(0, FLAGS_source_node), graph.nnodes - 1));
}
static int Output(const char *file, const std::vector<level_t>& levels)
{
return BFSOutput(file, levels);
}
static int CheckErrors(const std::vector<level_t>& levels, const std::vector<level_t>& regression)
{
return BFSCheckErrors(levels, regression);
}
};
}
}
bool TestBFSAsyncMultiOptimized(int ngpus)
{
typedef bfs::opt::FusedProblem<groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> ProblemType;
typedef groute::graphs::traversal::FusedSolver<
bfs::opt::Algo, ProblemType,
bfs::opt::local_work_t , bfs::opt::remote_work_t, level_t,
bfs::opt::SplitOps,
groute::graphs::dev::CSRGraphSeg, groute::graphs::dev::GraphDatum<level_t>> SolverType;
groute::graphs::traversal::__MultiRunner__Opt__ <
bfs::opt::Algo,
ProblemType,
SolverType,
bfs::opt::SplitOps,
bfs::opt::local_work_t,
bfs::opt::remote_work_t,
groute::graphs::multi::NodeOutputGlobalDatum<level_t> > runner;
groute::graphs::multi::NodeOutputGlobalDatum<level_t> levels_datum;
bool retval = runner(ngpus, levels_datum);
if(FLAGS_exitonerror && !retval)
exit(100);
return retval;
}
|
3bd7eb5eefccf8fb1d26a60a3eceffff45eb9e6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for ( int k = 1; k < rows; k++ ) {
out_diff[index] += in_diff[index + k*rowPitch]
* in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0);
}
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUParamBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(cdim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
| 3bd7eb5eefccf8fb1d26a60a3eceffff45eb9e6e.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <vector>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/prelu_layer.hpp"
namespace caffe {
// CUDA kernele for forward
template <typename Dtype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Dtype* in, Dtype* out, const Dtype* slope_data,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out[index] = in[index] > 0 ? in[index] : in[index] * slope_data[c];
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void PReLUBackward(const int n, const int channels, const int dim,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff,
const Dtype* slope_data, const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * slope_data[c]);
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void PReLUParamBackward(const int n,
const int rows, const int rowPitch, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * in_data[index] * (in_data[index] <= 0);
for ( int k = 1; k < rows; k++ ) {
out_diff[index] += in_diff[index + k*rowPitch]
* in_data[index + k*rowPitch] * (in_data[index + k*rowPitch] <= 0);
}
}
}
template <typename Dtype>
void PReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
const int div_factor = channel_shared_ ? channels : 1;
// For in-place computation
if (top[0] == bottom[0]) {
caffe_copy(count, bottom_data, bottom_memory_.mutable_gpu_data());
}
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, bottom_data, top_data, slope_data, div_factor);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void PReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
const int dim = bottom[0]->count(2);
const int channels = bottom[0]->channels();
// For in-place computation
if (top[0] == bottom[0]) {
bottom_data = bottom_memory_.gpu_data();
}
// Propagate to param
// Since to write bottom diff will affect top diff if top and bottom blobs
// are identical (in-place computaion), we first compute param backward to
// keep top_diff unchanged.
if (this->param_propagate_down_[0]) {
Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = channels * dim;
// compute element-wise diff
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUParamBackward<Dtype><<<CAFFE_GET_BLOCKS(cdim),
CAFFE_CUDA_NUM_THREADS>>>(
cdim, bottom[0]->num(), top[0]->offset(1), top_diff ,
bottom_data ,
backward_buff_.mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if (channel_shared_) {
Dtype dsum;
caffe_gpu_dot<Dtype>(channels * dim, backward_buff_.gpu_diff(),
multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff);
} else {
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, dim, 1.,
backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1.,
slope_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* slope_data = this->blobs_[0]->gpu_data();
int div_factor = channel_shared_ ? channels : 1;
// NOLINT_NEXT_LINE(whitespace/operators)
PReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, channels, dim, top_diff, bottom_data, bottom_diff, slope_data,
div_factor);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PReLULayer);
} // namespace caffe
|
a84cb5fd66c82b7f4782f9306562c4e90d246b5f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int main()
{
int count = 0;
hipGetDeviceCount(&count);
if (0 == count) {
fprintf(stderr,"found no GPU device\n");
exit (1);
}
fprintf(stdout,"found %d GPU on host\n",count);
int i = 0;
for (i=0;i<count;i++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,i);
// major version 6: pascal
// major version 5: Maxwell
// major versopm 3: kepler
// major version 2: Fermi
// major version 1: Tesla
fprintf(stdout,"Device :%d has compute capability %d:%d\n",i,deviceProp.major,deviceProp.minor);
}
return 0;
}
| a84cb5fd66c82b7f4782f9306562c4e90d246b5f.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int main()
{
int count = 0;
cudaGetDeviceCount(&count);
if (0 == count) {
fprintf(stderr,"found no GPU device\n");
exit (1);
}
fprintf(stdout,"found %d GPU on host\n",count);
int i = 0;
for (i=0;i<count;i++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,i);
// major version 6: pascal
// major version 5: Maxwell
// major versopm 3: kepler
// major version 2: Fermi
// major version 1: Tesla
fprintf(stdout,"Device :%d has compute capability %d:%d\n",i,deviceProp.major,deviceProp.minor);
}
return 0;
}
|
e2eb5ee89645204a701e828ec49c2a10c4e31303.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fake_dequantize_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void KeDequantize(const T* in, const T* scale, T max_range, int num,
T* out) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < num) {
out[idx] = in[idx] * scale[0] / max_range;
}
}
template <typename T>
struct DequantizeFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& dev_ctx,
const framework::Tensor* in, const framework::Tensor* scale,
T max_range, framework::Tensor* out) {
const T* in_data = in->data<T>();
const T* scale_factor = scale->data<T>();
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
int num = in->numel();
int block = 512;
int grid = (num + block - 1) / block;
hipLaunchKernelGGL(( KeDequantize<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
in_data, scale_factor, max_range, num, out_data);
}
};
template <typename T>
__global__ void DequantizeOneScaleQuantAxis0(const T* in, const T* scale,
T max_range, int num, int channel,
T* out) {
int tid = threadIdx.x;
int channel_size = num / channel;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
for (int i = tid; i < channel_size; i += blockDim.x) {
out_c[i] = in_c[i] * scale[blockIdx.x] / max_range;
}
}
template <typename T>
__global__ void DequantizeOneScaleQuantAxisN(const T* in, const T* scale,
const T max_range,
const int64_t num,
const int n_scales,
const int quant_stride, T* out) {
int64_t idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int64_t i = idx; i < num; i += blockDim.x * gridDim.x) {
T s = scale[(i / quant_stride) % n_scales];
out[i] = in[i] * s / max_range;
}
}
template <typename T>
__global__ void DequantizeTwoScale(const T* in, const T* scale_one,
const T* scale_two, T max_range, int num,
int iter_size, int channel, T* out) {
int tid = threadIdx.x;
int channel_size = num / (iter_size * channel);
int scale_index = blockIdx.x % channel;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
for (int i = tid; i < channel_size; i += blockDim.x) {
out_c[i] = in_c[i] * scale_one[scale_index] * scale_two[0] / max_range;
}
}
template <typename T>
struct ChannelDequantizeFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& dev_ctx,
const framework::Tensor* in, const framework::Tensor** scales,
const int scale_num, T max_range, const int quant_axis,
const int x_num_col_dims, framework::Tensor* out) {
auto in_dims = in->dims();
const T* in_data = in->data<T>();
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
if (scale_num == 1) {
int64_t num = in->numel();
const T* scale_factor = scales[0]->data<T>();
if (quant_axis == 0) {
int grid = in_dims[0];
int block = 1024;
hipLaunchKernelGGL(( DequantizeOneScaleQuantAxis0<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
in_data, scale_factor, max_range, num, in_dims[0], out_data);
} else {
int quant_stride = 1;
for (int i = quant_axis + 1; i < in_dims.size(); i++) {
quant_stride *= in_dims[i];
}
int64_t block_size = ::min(
num, static_cast<int64_t>(dev_ctx.GetMaxThreadsPerBlock() / 4));
int64_t max_threads =
dev_ctx.GetMaxPhysicalThreadCount(); // SM * block_per_SM
const int64_t max_blocks = ::max(
((max_threads - 1) / block_size + 1), static_cast<int64_t>(1));
const int64_t grid_size =
::min(max_blocks, (num + block_size - 1) / block_size);
hipLaunchKernelGGL(( DequantizeOneScaleQuantAxisN<
T>), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
in_data, scale_factor, max_range, num, in_dims[quant_axis],
quant_stride, out_data);
}
} else if (scale_num == 2) {
// Not need to consider quant_axis
int num = in->numel();
int iter_size = 1;
for (int i = 0; i < x_num_col_dims; i++) {
iter_size *= in->dims()[i];
}
int channel = in->dims()[x_num_col_dims];
const T* scale_one = scales[0]->data<T>();
const T* scale_two = scales[1]->data<T>();
int block = 1024;
int grid = iter_size * channel;
hipLaunchKernelGGL(( DequantizeTwoScale<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
in_data, scale_one, scale_two, max_range, num, iter_size, channel,
out_data);
}
}
};
template struct DequantizeFunctor<platform::CUDADeviceContext, float>;
template struct DequantizeFunctor<platform::CUDADeviceContext, double>;
template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, float>;
template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(fake_dequantize_max_abs,
ops::FakeDequantizeMaxAbsKernel<CUDA, float>,
ops::FakeDequantizeMaxAbsKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(
fake_channel_wise_dequantize_max_abs,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, float>,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, double>);
| e2eb5ee89645204a701e828ec49c2a10c4e31303.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fake_dequantize_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void KeDequantize(const T* in, const T* scale, T max_range, int num,
T* out) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < num) {
out[idx] = in[idx] * scale[0] / max_range;
}
}
template <typename T>
struct DequantizeFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& dev_ctx,
const framework::Tensor* in, const framework::Tensor* scale,
T max_range, framework::Tensor* out) {
const T* in_data = in->data<T>();
const T* scale_factor = scale->data<T>();
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
int num = in->numel();
int block = 512;
int grid = (num + block - 1) / block;
KeDequantize<T><<<grid, block, 0, dev_ctx.stream()>>>(
in_data, scale_factor, max_range, num, out_data);
}
};
template <typename T>
__global__ void DequantizeOneScaleQuantAxis0(const T* in, const T* scale,
T max_range, int num, int channel,
T* out) {
int tid = threadIdx.x;
int channel_size = num / channel;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
for (int i = tid; i < channel_size; i += blockDim.x) {
out_c[i] = in_c[i] * scale[blockIdx.x] / max_range;
}
}
template <typename T>
__global__ void DequantizeOneScaleQuantAxisN(const T* in, const T* scale,
const T max_range,
const int64_t num,
const int n_scales,
const int quant_stride, T* out) {
int64_t idx = blockDim.x * blockIdx.x + threadIdx.x;
for (int64_t i = idx; i < num; i += blockDim.x * gridDim.x) {
T s = scale[(i / quant_stride) % n_scales];
out[i] = in[i] * s / max_range;
}
}
template <typename T>
__global__ void DequantizeTwoScale(const T* in, const T* scale_one,
const T* scale_two, T max_range, int num,
int iter_size, int channel, T* out) {
int tid = threadIdx.x;
int channel_size = num / (iter_size * channel);
int scale_index = blockIdx.x % channel;
const T* in_c = in + blockIdx.x * channel_size;
T* out_c = out + blockIdx.x * channel_size;
for (int i = tid; i < channel_size; i += blockDim.x) {
out_c[i] = in_c[i] * scale_one[scale_index] * scale_two[0] / max_range;
}
}
template <typename T>
struct ChannelDequantizeFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& dev_ctx,
const framework::Tensor* in, const framework::Tensor** scales,
const int scale_num, T max_range, const int quant_axis,
const int x_num_col_dims, framework::Tensor* out) {
auto in_dims = in->dims();
const T* in_data = in->data<T>();
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
if (scale_num == 1) {
int64_t num = in->numel();
const T* scale_factor = scales[0]->data<T>();
if (quant_axis == 0) {
int grid = in_dims[0];
int block = 1024;
DequantizeOneScaleQuantAxis0<T><<<grid, block, 0, dev_ctx.stream()>>>(
in_data, scale_factor, max_range, num, in_dims[0], out_data);
} else {
int quant_stride = 1;
for (int i = quant_axis + 1; i < in_dims.size(); i++) {
quant_stride *= in_dims[i];
}
int64_t block_size = std::min(
num, static_cast<int64_t>(dev_ctx.GetMaxThreadsPerBlock() / 4));
int64_t max_threads =
dev_ctx.GetMaxPhysicalThreadCount(); // SM * block_per_SM
const int64_t max_blocks = std::max(
((max_threads - 1) / block_size + 1), static_cast<int64_t>(1));
const int64_t grid_size =
std::min(max_blocks, (num + block_size - 1) / block_size);
DequantizeOneScaleQuantAxisN<
T><<<grid_size, block_size, 0, dev_ctx.stream()>>>(
in_data, scale_factor, max_range, num, in_dims[quant_axis],
quant_stride, out_data);
}
} else if (scale_num == 2) {
// Not need to consider quant_axis
int num = in->numel();
int iter_size = 1;
for (int i = 0; i < x_num_col_dims; i++) {
iter_size *= in->dims()[i];
}
int channel = in->dims()[x_num_col_dims];
const T* scale_one = scales[0]->data<T>();
const T* scale_two = scales[1]->data<T>();
int block = 1024;
int grid = iter_size * channel;
DequantizeTwoScale<T><<<grid, block, 0, dev_ctx.stream()>>>(
in_data, scale_one, scale_two, max_range, num, iter_size, channel,
out_data);
}
}
};
template struct DequantizeFunctor<platform::CUDADeviceContext, float>;
template struct DequantizeFunctor<platform::CUDADeviceContext, double>;
template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, float>;
template struct ChannelDequantizeFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(fake_dequantize_max_abs,
ops::FakeDequantizeMaxAbsKernel<CUDA, float>,
ops::FakeDequantizeMaxAbsKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(
fake_channel_wise_dequantize_max_abs,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, float>,
ops::FakeChannelWiseDequantizeMaxAbsKernel<CUDA, double>);
|
54f870a972694a4b1ca69f907fbac476bfd6cd14.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// PFOR and PFOR-DELTA Compression and decompression routines
#include <stdio.h>
#include <iomanip>
#include <thrust/extrema.h>
#include "cm.h"
using namespace std;
thrust::device_vector<unsigned char> scratch;
bool phase_copy = 0;
std::map<string, unsigned int> cnt_counts;
string curr_file;
struct int64_to_char
{
__host__ __device__
unsigned char operator()(const int_type x)
{
return (unsigned char)x;
}
};
struct char_to_int64
{
__host__ __device__
int_type operator()(const unsigned char x)
{
return (int_type)x;
}
};
struct int64_to_int16
{
__host__ __device__
unsigned short int operator()(const int_type x)
{
return (unsigned short int)x;
}
};
struct int16_to_int64
{
__host__ __device__
int_type operator()(const unsigned short int x)
{
return (int_type)x;
}
};
struct int64_to_int32
{
__host__ __device__
unsigned int operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct int32_to_int64
{
__host__ __device__
int_type operator()(const unsigned int x)
{
return (int_type)x;
}
};
struct bool_to_int
{
__host__ __device__
unsigned int operator()(const bool x)
{
return (unsigned int)x;
}
};
struct ui_to_ll
{
__host__ __device__
long long int operator()(const unsigned int x)
{
return (long long int)x;
}
};
struct compress_functor_int
{
const int_type * source;
unsigned long long int * dest;
const long long int * start_val;
const unsigned int * vals;
compress_functor_int(const int_type * _source, unsigned long long int * _dest,
const long long int * _start_val, const unsigned int * _vals):
source(_source), dest(_dest), start_val(_start_val), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
long long int val = source[i] - start_val[0];
unsigned int shifted = vals[2] - vals[0] - (i%vals[1])*vals[0];
dest[i] = val << shifted;
}
};
struct compress_functor_float
{
const long long int * source;
unsigned long long int * dest;
const long long int * start_val;
const unsigned int * vals;
compress_functor_float(const long long int * _source, unsigned long long int * _dest,
const long long int * _start_val, const unsigned int * _vals):
source(_source), dest(_dest), start_val(_start_val), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
long long int val;
unsigned int bits = vals[0];
unsigned int fit_count = vals[1];
unsigned int int_sz = vals[2];
val = source[i] - start_val[0];
unsigned int z = i%fit_count;
unsigned int shifted = int_sz - bits - z*bits;
dest[i] = val << shifted;
}
};
struct decompress_functor_int
{
const unsigned long long int * source;
int_type * dest;
const long long int * start_val;
const unsigned int * vals;
decompress_functor_int(const unsigned long long int * _source, int_type * _dest,
const long long int * _start_val, const unsigned int * _vals):
source(_source), dest(_dest), start_val(_start_val), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
unsigned long long int tmp = source[i/vals[1]] >> (vals[2] - vals[0] - (i%vals[1])*vals[0]);
// set the rest of bits to 0
tmp = tmp << (vals[2] - vals[0]);
tmp = tmp >> (vals[2] - vals[0]);
dest[i] = tmp + start_val[0];
}
};
struct decompress_functor_str
{
const unsigned long long * source;
unsigned int * dest;
const unsigned int * vals;
decompress_functor_str(const unsigned long long int * _source, unsigned int * _dest,
const unsigned int * _vals):
source(_source), dest(_dest), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
unsigned int bits = vals[0];
unsigned int fit_count = vals[1];
unsigned int int_sz = 64;
//find the source index
unsigned int src_idx = i/fit_count;
// find the exact location
unsigned int src_loc = i%fit_count;
//right shift the values
unsigned int shifted = ((fit_count-src_loc)-1)*bits;
unsigned long long int tmp = source[src_idx] >> shifted;
// set the rest of bits to 0
tmp = tmp << (int_sz - bits);
tmp = tmp >> (int_sz - bits);
dest[i] = tmp;
}
};
size_t pfor_decompress(void* destination, void* host, void* d_v, void* s_v, string colname)
{
unsigned int bit_count = 64;
auto cnt = ((unsigned int*)host)[0];
auto orig_recCount = ((unsigned int*)((char*)host + cnt))[7];
auto bits = ((unsigned int*)((char*)host + cnt))[8];
auto orig_lower_val = ((long long int*)((unsigned int*)((char*)host + cnt) + 9))[0];
auto fit_count = ((unsigned int*)((char*)host + cnt))[11];
auto start_val = ((long long int*)((unsigned int*)((char*)host + cnt) + 12))[0];
auto comp_type = ((unsigned int*)host)[5];
//cout << "Decomp Header " << orig_recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl;
if(scratch.size() < cnt)
scratch.resize(cnt);
hipMemcpy(thrust::raw_pointer_cast(scratch.data()), (void*)((unsigned int*)host + 6), cnt, hipMemcpyHostToDevice);
thrust::device_ptr<int_type> d_int((int_type*)destination);
if(comp_type == 1) {
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
thrust::device_ptr<long long int> dd_sv((long long int*)s_v);
dd_sv[0] = orig_lower_val;
dd_v[0] = bits;
dd_v[1] = fit_count;
dd_v[2] = bit_count;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_int ff1((const unsigned long long int *)thrust::raw_pointer_cast(scratch.data()),(int_type*)destination, (long long int*)s_v, (unsigned int*)d_v);
thrust::for_each(begin, begin + orig_recCount, ff1);
d_int[0] = start_val;
thrust::inclusive_scan(d_int, d_int + orig_recCount, d_int);
}
else {
if(!phase_copy) {
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(scratch.data()));
thrust::transform(src, src+orig_recCount, d_int, char_to_int64());
}
else if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(scratch.data()));
thrust::transform(src, src+orig_recCount, d_int, int16_to_int64());
}
else if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::transform(src, src+orig_recCount, d_int, int32_to_int64());
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::copy(src, src+orig_recCount, d_int);
};
thrust::constant_iterator<int_type> iter(orig_lower_val);
thrust::transform(d_int, d_int+orig_recCount, iter, d_int, thrust::plus<int_type>());
}
else {
cpy_bits[colname] = bits;
cpy_init_val[colname] = orig_lower_val;
if(bits == 8) {
thrust::device_ptr<unsigned char> dest((unsigned char*)destination);
thrust::copy(scratch.begin(), scratch.begin()+orig_recCount, dest);
}
else if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(scratch.data()));
thrust::device_ptr<unsigned short int> dest((unsigned short int*)destination);
thrust::copy(src, src+orig_recCount, dest);
}
else if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::device_ptr<unsigned int> dest((unsigned int*)destination);
thrust::copy(src, src+orig_recCount, dest);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::copy(src, src+orig_recCount, d_int);
};
//cout << "using phase copy on " << colname << " " << bits << endl;
};
};
return orig_recCount;
}
template< typename T>
void pfor_delta_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp)
//void pfor_delta_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T>& host, bool tp)
{
long long int orig_lower_val, orig_upper_val, start_val, real_lower, real_upper;
unsigned int bits, recCount;
unsigned int bit_count = 8*8;
unsigned int fit_count;
unsigned int comp_type = 1; // FOR-DELTA
if(tp == 0)
recCount = source_len/int_size;
else
recCount = source_len/float_size;
void* ss;
CUDA_SAFE_CALL(hipMalloc((void **) &ss, recCount*float_size));
if (tp == 0) {
thrust::device_ptr<int_type> s((int_type*)source);
thrust::device_ptr<int_type> d_ss((int_type*)ss);
thrust::adjacent_difference(s, s+recCount, d_ss);
start_val = d_ss[0];
if(recCount > 1)
d_ss[0] = d_ss[1];
orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount));
orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount));
real_lower = s[0];
real_upper = s[recCount-1];
//cout << "orig " << orig_upper_val << " " << orig_lower_val << endl;
//cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values " << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1)));
if (bits == 0)
bits = 1;
}
else {
thrust::device_ptr<long long int> s((long long int*)source);
thrust::device_ptr<long long int> d_ss((long long int*)ss);
thrust::adjacent_difference(s, s+recCount, d_ss);
start_val = d_ss[0];
if(recCount > 1)
d_ss[0] = d_ss[1];
orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount));
orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount));
real_lower = s[0];
real_upper = s[recCount-1];
//cout << "orig " << orig_upper_val << " " << orig_lower_val << endl;
//cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values" << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1)));
if (bits == 0)
bits = 1;
};
thrust::counting_iterator<unsigned int> begin(0);
fit_count = bit_count/bits;
void* d_v1;
CUDA_SAFE_CALL(hipMalloc((void **) &d_v1, 12));
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1);
void* s_v1;
CUDA_SAFE_CALL(hipMalloc((void **) &s_v1, 8));
thrust::device_ptr<long long int> dd_sv((long long int*)s_v1);
dd_sv[0] = orig_lower_val;
dd_v[0] = bits;
dd_v[1] = fit_count;
dd_v[2] = bit_count;
//void* d;
//CUDA_SAFE_CALL(hipMalloc((void **) &d, recCount*float_size));
thrust::device_ptr<char> dd((char*)source);
thrust::fill(dd, dd+source_len,0);
//cout << "FF " << orig_lower_val << " " << bits << " " << fit_count << " " << bit_count << endl;
if (tp == 0) {
compress_functor_int ff((int_type*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
}
else {
compress_functor_float ff((long long int*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
};
thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)source);
// make an addition sequence
thrust::device_ptr<unsigned long long int> add_seq((unsigned long long int*)ss);
thrust::constant_iterator<unsigned long long int> iter(fit_count);
thrust::sequence(add_seq, add_seq + recCount, 0, 1);
thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned long long int>());
unsigned int cnt = (recCount)/fit_count;
if (recCount%fit_count > 0)
cnt++;
thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt);
thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(),
fin_seq);
// copy fin_seq to host
unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq);
//cout << file_name << " CNT " << cnt << " " << recCount << endl;
cnt = cnt*8;
hipMemcpy( host.data(), (void *)raw_src, cnt, hipMemcpyDeviceToHost);
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&real_lower, 8);
binary_file.write((char *)&real_upper, 8);
binary_file.write((char *)&comp_type, 4);
binary_file.write((char *)host.data(),cnt);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&recCount, 4);
binary_file.write((char *)&bits, 4);
binary_file.write((char *)&orig_lower_val, 8);
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&start_val, 8);
binary_file.close();
if(cnt_counts[curr_file] < cnt)
cnt_counts[curr_file] = cnt;
thrust::device_free(fin_seq);
hipFree(ss);
hipFree(d_v1);
hipFree(s_v1);
}
// non sorted compressed fields should have 1,2,4 or 8 byte values for direct operations on compressed values
template< typename T>
void pfor_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp)
//void pfor_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T>& host, bool tp)
{
unsigned int recCount = source_len/int_size;
long long int orig_lower_val;
long long int orig_upper_val;
unsigned int bits;
unsigned int fit_count = 0;
unsigned int comp_type = 0; // FOR
long long int start_val = 0;
bool sorted = 0;
// check if sorted
if(delta) {
if (tp == 0) {
thrust::device_ptr<int_type> s((int_type*)source);
sorted = thrust::is_sorted(s, s+recCount);
}
else {
recCount = source_len/float_size;
thrust::device_ptr<long long int> s((long long int*)source);
sorted = thrust::is_sorted(s, s+recCount);
};
//cout << "file " << file_name << " is sorted " << sorted << endl;
if(sorted) {
pfor_delta_compress(source, source_len, file_name, host, tp);
return;
};
};
if (tp == 0) {
thrust::device_ptr<int_type> s((int_type*)source);
orig_lower_val = *(thrust::min_element(s, s + recCount));
orig_upper_val = *(thrust::max_element(s, s + recCount));
//cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1)));
}
else {
thrust::device_ptr<long long int> s((long long int*)source);
orig_lower_val = *(thrust::min_element(s, s + recCount));
orig_upper_val = *(thrust::max_element(s, s + recCount));
//cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1)));
};
if (bits != 8 && bits != 16 && bits != 32 && bits != 64) {
if(bits < 8)
bits = 8;
else if(bits < 16)
bits = 16;
else if(bits < 32)
bits = 32;
else if(bits < 64)
bits = 64;
};
//cout << "We will really need " << bits << endl;
unsigned int cnt;
thrust::device_ptr<int_type> s((int_type*)source);
thrust::constant_iterator<int_type> iter(orig_lower_val);
thrust::transform(s, s+recCount, iter, s, thrust::minus<int_type>());
thrust::device_vector<int8_type> d_columns_int8;
thrust::device_vector<int16_type> d_columns_int16;
thrust::device_vector<int32_type> d_columns_int32;
if(bits == 8) {
d_columns_int8.resize(recCount);
thrust::transform(s, s+recCount, d_columns_int8.begin(), int64_to_char());
hipMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int8.data()), recCount, hipMemcpyDeviceToHost);
cnt = recCount;
}
else if(bits == 16) {
d_columns_int16.resize(recCount);
thrust::transform(s, s+recCount, d_columns_int16.begin(), int64_to_int16());
hipMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int16.data()), recCount*2, hipMemcpyDeviceToHost);
cnt = recCount*2;
}
else if(bits == 32) {
d_columns_int32.resize(recCount);
thrust::transform(s, s+recCount, d_columns_int32.begin(), int64_to_int32());
hipMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int32.data()), recCount*4, hipMemcpyDeviceToHost);
cnt = recCount*4;
}
else {
hipMemcpy( host.data(), (void*)source, recCount*8, hipMemcpyDeviceToHost);
cnt = recCount*8;
};
fit_count = 64/bits;
/*thrust::counting_iterator<unsigned int> begin(0);
fit_count = bit_count/bits;
void* d_v1;
CUDA_SAFE_CALL(hipMalloc((void **) &d_v1, 12));
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1);
void* s_v1;
CUDA_SAFE_CALL(hipMalloc((void **) &s_v1, 8));
thrust::device_ptr<long long int> dd_sv((long long int*)s_v1);
dd_sv[0] = orig_lower_val;
dd_v[0] = bits;
dd_v[1] = fit_count;
dd_v[2] = bit_count;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, recCount*float_size));
thrust::device_ptr<char> dd((char*)d);
thrust::fill(dd, dd+source_len,0);
if (tp == 0) {
compress_functor_int ff((int_type*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
}
else {
compress_functor_float ff((long long int*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
};
thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d);
// make an addition sequence
thrust::device_ptr<unsigned int> add_seq = thrust::device_malloc<unsigned int>(recCount);
thrust::constant_iterator<unsigned int> iter(fit_count);
thrust::sequence(add_seq, add_seq + recCount, 0, 1);
thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned int>());
unsigned int cnt = (recCount)/fit_count;
if(cnt == 0)
cnt = 1; // need at least 1
if (recCount%fit_count > 0)
cnt++;
//thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt);
thrust::device_ptr<unsigned long long int> fin_seq((unsigned long long int*)source);
thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(),
fin_seq);
// copy fin_seq to host
unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq);
//cout << file_name << " CNT " << cnt << " " << recCount << endl;
*/
//cout << "comp Header " << recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&orig_lower_val, 8);
binary_file.write((char *)&orig_upper_val, 8);
binary_file.write((char *)&comp_type, 4);
binary_file.write((char *)host.data(),cnt);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&recCount, 4);
binary_file.write((char *)&bits, 4);
binary_file.write((char *)&orig_lower_val, 8);
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&start_val, 8);
binary_file.close();
if(cnt_counts[curr_file] < cnt)
cnt_counts[curr_file] = cnt;
} | 54f870a972694a4b1ca69f907fbac476bfd6cd14.cu | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// PFOR and PFOR-DELTA Compression and decompression routines
#include <stdio.h>
#include <iomanip>
#include <thrust/extrema.h>
#include "cm.h"
using namespace std;
thrust::device_vector<unsigned char> scratch;
bool phase_copy = 0;
std::map<string, unsigned int> cnt_counts;
string curr_file;
struct int64_to_char
{
__host__ __device__
unsigned char operator()(const int_type x)
{
return (unsigned char)x;
}
};
struct char_to_int64
{
__host__ __device__
int_type operator()(const unsigned char x)
{
return (int_type)x;
}
};
struct int64_to_int16
{
__host__ __device__
unsigned short int operator()(const int_type x)
{
return (unsigned short int)x;
}
};
struct int16_to_int64
{
__host__ __device__
int_type operator()(const unsigned short int x)
{
return (int_type)x;
}
};
struct int64_to_int32
{
__host__ __device__
unsigned int operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct int32_to_int64
{
__host__ __device__
int_type operator()(const unsigned int x)
{
return (int_type)x;
}
};
struct bool_to_int
{
__host__ __device__
unsigned int operator()(const bool x)
{
return (unsigned int)x;
}
};
struct ui_to_ll
{
__host__ __device__
long long int operator()(const unsigned int x)
{
return (long long int)x;
}
};
struct compress_functor_int
{
const int_type * source;
unsigned long long int * dest;
const long long int * start_val;
const unsigned int * vals;
compress_functor_int(const int_type * _source, unsigned long long int * _dest,
const long long int * _start_val, const unsigned int * _vals):
source(_source), dest(_dest), start_val(_start_val), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
long long int val = source[i] - start_val[0];
unsigned int shifted = vals[2] - vals[0] - (i%vals[1])*vals[0];
dest[i] = val << shifted;
}
};
struct compress_functor_float
{
const long long int * source;
unsigned long long int * dest;
const long long int * start_val;
const unsigned int * vals;
compress_functor_float(const long long int * _source, unsigned long long int * _dest,
const long long int * _start_val, const unsigned int * _vals):
source(_source), dest(_dest), start_val(_start_val), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
long long int val;
unsigned int bits = vals[0];
unsigned int fit_count = vals[1];
unsigned int int_sz = vals[2];
val = source[i] - start_val[0];
unsigned int z = i%fit_count;
unsigned int shifted = int_sz - bits - z*bits;
dest[i] = val << shifted;
}
};
struct decompress_functor_int
{
const unsigned long long int * source;
int_type * dest;
const long long int * start_val;
const unsigned int * vals;
decompress_functor_int(const unsigned long long int * _source, int_type * _dest,
const long long int * _start_val, const unsigned int * _vals):
source(_source), dest(_dest), start_val(_start_val), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
unsigned long long int tmp = source[i/vals[1]] >> (vals[2] - vals[0] - (i%vals[1])*vals[0]);
// set the rest of bits to 0
tmp = tmp << (vals[2] - vals[0]);
tmp = tmp >> (vals[2] - vals[0]);
dest[i] = tmp + start_val[0];
}
};
struct decompress_functor_str
{
const unsigned long long * source;
unsigned int * dest;
const unsigned int * vals;
decompress_functor_str(const unsigned long long int * _source, unsigned int * _dest,
const unsigned int * _vals):
source(_source), dest(_dest), vals(_vals) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
unsigned int bits = vals[0];
unsigned int fit_count = vals[1];
unsigned int int_sz = 64;
//find the source index
unsigned int src_idx = i/fit_count;
// find the exact location
unsigned int src_loc = i%fit_count;
//right shift the values
unsigned int shifted = ((fit_count-src_loc)-1)*bits;
unsigned long long int tmp = source[src_idx] >> shifted;
// set the rest of bits to 0
tmp = tmp << (int_sz - bits);
tmp = tmp >> (int_sz - bits);
dest[i] = tmp;
}
};
size_t pfor_decompress(void* destination, void* host, void* d_v, void* s_v, string colname)
{
unsigned int bit_count = 64;
auto cnt = ((unsigned int*)host)[0];
auto orig_recCount = ((unsigned int*)((char*)host + cnt))[7];
auto bits = ((unsigned int*)((char*)host + cnt))[8];
auto orig_lower_val = ((long long int*)((unsigned int*)((char*)host + cnt) + 9))[0];
auto fit_count = ((unsigned int*)((char*)host + cnt))[11];
auto start_val = ((long long int*)((unsigned int*)((char*)host + cnt) + 12))[0];
auto comp_type = ((unsigned int*)host)[5];
//cout << "Decomp Header " << orig_recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl;
if(scratch.size() < cnt)
scratch.resize(cnt);
cudaMemcpy(thrust::raw_pointer_cast(scratch.data()), (void*)((unsigned int*)host + 6), cnt, cudaMemcpyHostToDevice);
thrust::device_ptr<int_type> d_int((int_type*)destination);
if(comp_type == 1) {
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
thrust::device_ptr<long long int> dd_sv((long long int*)s_v);
dd_sv[0] = orig_lower_val;
dd_v[0] = bits;
dd_v[1] = fit_count;
dd_v[2] = bit_count;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_int ff1((const unsigned long long int *)thrust::raw_pointer_cast(scratch.data()),(int_type*)destination, (long long int*)s_v, (unsigned int*)d_v);
thrust::for_each(begin, begin + orig_recCount, ff1);
d_int[0] = start_val;
thrust::inclusive_scan(d_int, d_int + orig_recCount, d_int);
}
else {
if(!phase_copy) {
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(scratch.data()));
thrust::transform(src, src+orig_recCount, d_int, char_to_int64());
}
else if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(scratch.data()));
thrust::transform(src, src+orig_recCount, d_int, int16_to_int64());
}
else if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::transform(src, src+orig_recCount, d_int, int32_to_int64());
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::copy(src, src+orig_recCount, d_int);
};
thrust::constant_iterator<int_type> iter(orig_lower_val);
thrust::transform(d_int, d_int+orig_recCount, iter, d_int, thrust::plus<int_type>());
}
else {
cpy_bits[colname] = bits;
cpy_init_val[colname] = orig_lower_val;
if(bits == 8) {
thrust::device_ptr<unsigned char> dest((unsigned char*)destination);
thrust::copy(scratch.begin(), scratch.begin()+orig_recCount, dest);
}
else if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(scratch.data()));
thrust::device_ptr<unsigned short int> dest((unsigned short int*)destination);
thrust::copy(src, src+orig_recCount, dest);
}
else if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(scratch.data()));
thrust::device_ptr<unsigned int> dest((unsigned int*)destination);
thrust::copy(src, src+orig_recCount, dest);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(scratch.data()));
thrust::copy(src, src+orig_recCount, d_int);
};
//cout << "using phase copy on " << colname << " " << bits << endl;
};
};
return orig_recCount;
}
template< typename T>
void pfor_delta_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp)
//void pfor_delta_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T>& host, bool tp)
{
long long int orig_lower_val, orig_upper_val, start_val, real_lower, real_upper;
unsigned int bits, recCount;
unsigned int bit_count = 8*8;
unsigned int fit_count;
unsigned int comp_type = 1; // FOR-DELTA
if(tp == 0)
recCount = source_len/int_size;
else
recCount = source_len/float_size;
void* ss;
CUDA_SAFE_CALL(cudaMalloc((void **) &ss, recCount*float_size));
if (tp == 0) {
thrust::device_ptr<int_type> s((int_type*)source);
thrust::device_ptr<int_type> d_ss((int_type*)ss);
thrust::adjacent_difference(s, s+recCount, d_ss);
start_val = d_ss[0];
if(recCount > 1)
d_ss[0] = d_ss[1];
orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount));
orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount));
real_lower = s[0];
real_upper = s[recCount-1];
//cout << "orig " << orig_upper_val << " " << orig_lower_val << endl;
//cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values " << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1)));
if (bits == 0)
bits = 1;
}
else {
thrust::device_ptr<long long int> s((long long int*)source);
thrust::device_ptr<long long int> d_ss((long long int*)ss);
thrust::adjacent_difference(s, s+recCount, d_ss);
start_val = d_ss[0];
if(recCount > 1)
d_ss[0] = d_ss[1];
orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount));
orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount));
real_lower = s[0];
real_upper = s[recCount-1];
//cout << "orig " << orig_upper_val << " " << orig_lower_val << endl;
//cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values" << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1)));
if (bits == 0)
bits = 1;
};
thrust::counting_iterator<unsigned int> begin(0);
fit_count = bit_count/bits;
void* d_v1;
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12));
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1);
void* s_v1;
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8));
thrust::device_ptr<long long int> dd_sv((long long int*)s_v1);
dd_sv[0] = orig_lower_val;
dd_v[0] = bits;
dd_v[1] = fit_count;
dd_v[2] = bit_count;
//void* d;
//CUDA_SAFE_CALL(cudaMalloc((void **) &d, recCount*float_size));
thrust::device_ptr<char> dd((char*)source);
thrust::fill(dd, dd+source_len,0);
//cout << "FF " << orig_lower_val << " " << bits << " " << fit_count << " " << bit_count << endl;
if (tp == 0) {
compress_functor_int ff((int_type*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
}
else {
compress_functor_float ff((long long int*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
};
thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)source);
// make an addition sequence
thrust::device_ptr<unsigned long long int> add_seq((unsigned long long int*)ss);
thrust::constant_iterator<unsigned long long int> iter(fit_count);
thrust::sequence(add_seq, add_seq + recCount, 0, 1);
thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned long long int>());
unsigned int cnt = (recCount)/fit_count;
if (recCount%fit_count > 0)
cnt++;
thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt);
thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(),
fin_seq);
// copy fin_seq to host
unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq);
//cout << file_name << " CNT " << cnt << " " << recCount << endl;
cnt = cnt*8;
cudaMemcpy( host.data(), (void *)raw_src, cnt, cudaMemcpyDeviceToHost);
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&real_lower, 8);
binary_file.write((char *)&real_upper, 8);
binary_file.write((char *)&comp_type, 4);
binary_file.write((char *)host.data(),cnt);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&recCount, 4);
binary_file.write((char *)&bits, 4);
binary_file.write((char *)&orig_lower_val, 8);
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&start_val, 8);
binary_file.close();
if(cnt_counts[curr_file] < cnt)
cnt_counts[curr_file] = cnt;
thrust::device_free(fin_seq);
cudaFree(ss);
cudaFree(d_v1);
cudaFree(s_v1);
}
// non sorted compressed fields should have 1,2,4 or 8 byte values for direct operations on compressed values
template< typename T>
void pfor_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp)
//void pfor_compress(void* source, size_t source_len, string file_name, thrust::host_vector<T>& host, bool tp)
{
unsigned int recCount = source_len/int_size;
long long int orig_lower_val;
long long int orig_upper_val;
unsigned int bits;
unsigned int fit_count = 0;
unsigned int comp_type = 0; // FOR
long long int start_val = 0;
bool sorted = 0;
// check if sorted
if(delta) {
if (tp == 0) {
thrust::device_ptr<int_type> s((int_type*)source);
sorted = thrust::is_sorted(s, s+recCount);
}
else {
recCount = source_len/float_size;
thrust::device_ptr<long long int> s((long long int*)source);
sorted = thrust::is_sorted(s, s+recCount);
};
//cout << "file " << file_name << " is sorted " << sorted << endl;
if(sorted) {
pfor_delta_compress(source, source_len, file_name, host, tp);
return;
};
};
if (tp == 0) {
thrust::device_ptr<int_type> s((int_type*)source);
orig_lower_val = *(thrust::min_element(s, s + recCount));
orig_upper_val = *(thrust::max_element(s, s + recCount));
//cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1)));
}
else {
thrust::device_ptr<long long int> s((long long int*)source);
orig_lower_val = *(thrust::min_element(s, s + recCount));
orig_upper_val = *(thrust::max_element(s, s + recCount));
//cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl;
bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1)));
};
if (bits != 8 && bits != 16 && bits != 32 && bits != 64) {
if(bits < 8)
bits = 8;
else if(bits < 16)
bits = 16;
else if(bits < 32)
bits = 32;
else if(bits < 64)
bits = 64;
};
//cout << "We will really need " << bits << endl;
unsigned int cnt;
thrust::device_ptr<int_type> s((int_type*)source);
thrust::constant_iterator<int_type> iter(orig_lower_val);
thrust::transform(s, s+recCount, iter, s, thrust::minus<int_type>());
thrust::device_vector<int8_type> d_columns_int8;
thrust::device_vector<int16_type> d_columns_int16;
thrust::device_vector<int32_type> d_columns_int32;
if(bits == 8) {
d_columns_int8.resize(recCount);
thrust::transform(s, s+recCount, d_columns_int8.begin(), int64_to_char());
cudaMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int8.data()), recCount, cudaMemcpyDeviceToHost);
cnt = recCount;
}
else if(bits == 16) {
d_columns_int16.resize(recCount);
thrust::transform(s, s+recCount, d_columns_int16.begin(), int64_to_int16());
cudaMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int16.data()), recCount*2, cudaMemcpyDeviceToHost);
cnt = recCount*2;
}
else if(bits == 32) {
d_columns_int32.resize(recCount);
thrust::transform(s, s+recCount, d_columns_int32.begin(), int64_to_int32());
cudaMemcpy( host.data(), thrust::raw_pointer_cast(d_columns_int32.data()), recCount*4, cudaMemcpyDeviceToHost);
cnt = recCount*4;
}
else {
cudaMemcpy( host.data(), (void*)source, recCount*8, cudaMemcpyDeviceToHost);
cnt = recCount*8;
};
fit_count = 64/bits;
/*thrust::counting_iterator<unsigned int> begin(0);
fit_count = bit_count/bits;
void* d_v1;
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12));
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1);
void* s_v1;
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8));
thrust::device_ptr<long long int> dd_sv((long long int*)s_v1);
dd_sv[0] = orig_lower_val;
dd_v[0] = bits;
dd_v[1] = fit_count;
dd_v[2] = bit_count;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, recCount*float_size));
thrust::device_ptr<char> dd((char*)d);
thrust::fill(dd, dd+source_len,0);
if (tp == 0) {
compress_functor_int ff((int_type*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
}
else {
compress_functor_float ff((long long int*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1);
thrust::for_each(begin, begin + recCount, ff);
};
thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d);
// make an addition sequence
thrust::device_ptr<unsigned int> add_seq = thrust::device_malloc<unsigned int>(recCount);
thrust::constant_iterator<unsigned int> iter(fit_count);
thrust::sequence(add_seq, add_seq + recCount, 0, 1);
thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned int>());
unsigned int cnt = (recCount)/fit_count;
if(cnt == 0)
cnt = 1; // need at least 1
if (recCount%fit_count > 0)
cnt++;
//thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt);
thrust::device_ptr<unsigned long long int> fin_seq((unsigned long long int*)source);
thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(),
fin_seq);
// copy fin_seq to host
unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq);
//cout << file_name << " CNT " << cnt << " " << recCount << endl;
*/
//cout << "comp Header " << recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&orig_lower_val, 8);
binary_file.write((char *)&orig_upper_val, 8);
binary_file.write((char *)&comp_type, 4);
binary_file.write((char *)host.data(),cnt);
binary_file.write((char *)&cnt, 4);
binary_file.write((char *)&recCount, 4);
binary_file.write((char *)&bits, 4);
binary_file.write((char *)&orig_lower_val, 8);
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&start_val, 8);
binary_file.close();
if(cnt_counts[curr_file] < cnt)
cnt_counts[curr_file] = cnt;
} |
0145d3786e7660356e80bf54db896b2ede222c5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common_header.h"
#define TILE_SIZE 32
#define TILE_M TILE_SIZE
#define TILE_N TILE_SIZE
#define SELECTED_KERNEL myDgemmKernel_opt_32x32
__global__ void myDgemmKernel_opt_shared_mem_swizzling(
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double alpha,
const double* A, int lda,
const double* B, int ldb,
const double beta,
double* C, int ldc)
{
__shared__ double mat_a_shared_tile[TILE_SIZE * TILE_SIZE];
__shared__ double mat_b_shared_tile[TILE_SIZE * TILE_SIZE];
// get initial result value from global mem
int m_read_offset = blockIdx.x * TILE_SIZE + threadIdx.x;
int n_read_offset = blockIdx.y * TILE_SIZE + threadIdx.y;
int mat_c_write_offset = m_read_offset + n_read_offset * ldc;
double result = C[mat_c_write_offset] * beta;
for (int k_iter = 0; k_iter < k / TILE_SIZE; k_iter++) {
int k_iter_base_offset = k_iter * TILE_SIZE;
// matrix A&B write offsets for shared memory
int mat_a_shared_write_offset = threadIdx.x * TILE_SIZE + threadIdx.y;
int mat_b_shared_write_offset = threadIdx.y * TILE_SIZE + threadIdx.x;
// matrix A&B read offsets for global memory
int mat_a_global_read_offset =
(k_iter_base_offset + threadIdx.x) * lda + (blockIdx.x * TILE_SIZE + threadIdx.y);
int mat_b_global_read_offset =
(k_iter_base_offset + threadIdx.x) * ldb + (blockIdx.y * TILE_SIZE + threadIdx.y);
// copy global mem tiles to shared mem
mat_a_shared_tile[mat_a_shared_write_offset] = A[mat_a_global_read_offset];
mat_b_shared_tile[mat_b_shared_write_offset] = B[mat_b_global_read_offset];
// sync after copy
__syncthreads();
// dot product loop
#pragma unroll
for (int dp_iter = 0; dp_iter < TILE_SIZE; dp_iter++) {
int mat_a_read_offset = transa == HIPBLAS_OP_T ?
threadIdx.x * TILE_SIZE + dp_iter :
threadIdx.x + TILE_SIZE * dp_iter;
int mat_b_read_offset = transb == HIPBLAS_OP_T ?
threadIdx.y * TILE_SIZE + dp_iter :
threadIdx.y + TILE_SIZE * dp_iter;
result += alpha * mat_a_shared_tile[mat_a_read_offset] * mat_b_shared_tile[mat_b_read_offset];
}
// sync to make sure shared mem can be overwritten
__syncthreads();
}
// save result to global mem
C[mat_c_write_offset] = result;
}
__global__ void myDgemmKernel_32x32_shared_mem(
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double alpha,
const double* A, int lda,
const double* B, int ldb,
const double beta,
double* C, int ldc)
{
__shared__ double mat_a_shared_tile[TILE_SIZE * TILE_SIZE];
__shared__ double mat_b_shared_tile[TILE_SIZE * TILE_SIZE];
// get initial result value from global mem
int m_read_offset = blockIdx.x * TILE_SIZE + threadIdx.x;
int n_read_offset = blockIdx.y * TILE_SIZE + threadIdx.y;
int mat_c_write_offset = m_read_offset + n_read_offset * ldc;
double result = C[mat_c_write_offset] * beta;
for (int k_iter = 0; k_iter < k / TILE_SIZE; k_iter++) {
int k_iter_base_offset = k_iter * TILE_SIZE;
// matrix A&B write offsets for shared memory
int mat_a_shared_write_offset = threadIdx.x * TILE_SIZE + threadIdx.y;
int mat_b_shared_write_offset = threadIdx.y * TILE_SIZE + threadIdx.x;
// matrix A&B read offsets for global memory
int mat_a_global_read_offset = transa == HIPBLAS_OP_T ?
(k_iter_base_offset + threadIdx.x) + lda * (blockIdx.x * TILE_SIZE + threadIdx.y) :
(k_iter_base_offset + threadIdx.x) * lda + (blockIdx.x * TILE_SIZE + threadIdx.y);
int mat_b_global_read_offset = transb == HIPBLAS_OP_T ?
(k_iter_base_offset + threadIdx.x) * ldb + (blockIdx.y * TILE_SIZE + threadIdx.y) :
(k_iter_base_offset + threadIdx.x) + ldb * (blockIdx.y * TILE_SIZE + threadIdx.y);
// copy global mem tiles to shared mem
mat_a_shared_tile[mat_a_shared_write_offset] = A[mat_a_global_read_offset];
mat_b_shared_tile[mat_b_shared_write_offset] = B[mat_b_global_read_offset];
// sync after copy
__syncthreads();
// dot product loop
#pragma unroll
for (int dp_iter = 0; dp_iter < TILE_SIZE; dp_iter++) {
int mat_a_read_offset = threadIdx.x + TILE_SIZE * dp_iter;
int mat_b_read_offset = threadIdx.y * TILE_SIZE + dp_iter;
result += alpha * mat_a_shared_tile[mat_a_read_offset] * mat_b_shared_tile[mat_b_read_offset];
}
// sync to make sure shared mem can be overwritten
__syncthreads();
}
// save result to global mem
C[mat_c_write_offset] = result;
}
__global__ void myDgemmKernel_opt_32x32(
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double alpha,
const double* A, int lda,
const double* B, int ldb,
const double beta,
double* C, int ldc)
{
int m_read_offset = blockIdx.x * TILE_M + threadIdx.x;
int n_read_offset = blockIdx.y * TILE_N + threadIdx.y;
int mat_c_write_offset = m_read_offset + n_read_offset * ldc;
double result = C[mat_c_write_offset] * beta;
for (int k_iter = 0; k_iter < k; k_iter++) {
int mat_a_read_offset = transa == HIPBLAS_OP_T ?
m_read_offset * lda + k_iter :
m_read_offset + lda * k_iter;
int mat_b_read_offset = transb == HIPBLAS_OP_T ?
n_read_offset + ldb * k_iter :
n_read_offset * ldb + k_iter;
result += alpha * A[mat_a_read_offset] * B[mat_b_read_offset];
}
C[mat_c_write_offset] = result;
}
cudaReturnValue myDgemmHostCodeOpt(
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double* alpha,
const double* A, int lda,
const double* B, int ldb,
const double* beta,
double* C, int ldc) {
double* dev_A = 0;
const int dev_A_size = m * k * sizeof(double);
double* dev_B = 0;
const int dev_B_size = n * k * sizeof(double);
double* dev_C = 0;
const int dev_C_size = m * n * sizeof(double);
hipError_t cudaStatus;
double executionTime = -1.;
hipblasHandle_t handle;
hipblasStatus_t stat;
dim3 numBlocks(m / TILE_M, n / TILE_N);
dim3 threadsPerBlock(TILE_M, TILE_N);
clock_t t;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)& dev_A, dev_A_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_B, dev_B_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_C, dev_C_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "CUBLAS initialization failed\n");
cudaStatus = hipErrorNotSupported;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_A, A, dev_A_size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_B, B, dev_B_size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_C, C, dev_C_size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// start time measurement
t = clock();
// Launch a kernel on the GPU with one thread for each element.
SELECTED_KERNEL << <numBlocks, threadsPerBlock >> > (
transa, transb,
m, n, k,
*alpha,
dev_A, lda,
dev_B, ldb,
*beta,
dev_C, ldc
);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// measure time
t = clock() - t;
executionTime = ((double)t) / CLOCKS_PER_SEC;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(C, dev_C, dev_C_size, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_C);
return { cudaStatus, executionTime };
}
| 0145d3786e7660356e80bf54db896b2ede222c5a.cu | #include "common_header.h"
#define TILE_SIZE 32
#define TILE_M TILE_SIZE
#define TILE_N TILE_SIZE
#define SELECTED_KERNEL myDgemmKernel_opt_32x32
__global__ void myDgemmKernel_opt_shared_mem_swizzling(
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double alpha,
const double* A, int lda,
const double* B, int ldb,
const double beta,
double* C, int ldc)
{
__shared__ double mat_a_shared_tile[TILE_SIZE * TILE_SIZE];
__shared__ double mat_b_shared_tile[TILE_SIZE * TILE_SIZE];
// get initial result value from global mem
int m_read_offset = blockIdx.x * TILE_SIZE + threadIdx.x;
int n_read_offset = blockIdx.y * TILE_SIZE + threadIdx.y;
int mat_c_write_offset = m_read_offset + n_read_offset * ldc;
double result = C[mat_c_write_offset] * beta;
for (int k_iter = 0; k_iter < k / TILE_SIZE; k_iter++) {
int k_iter_base_offset = k_iter * TILE_SIZE;
// matrix A&B write offsets for shared memory
int mat_a_shared_write_offset = threadIdx.x * TILE_SIZE + threadIdx.y;
int mat_b_shared_write_offset = threadIdx.y * TILE_SIZE + threadIdx.x;
// matrix A&B read offsets for global memory
int mat_a_global_read_offset =
(k_iter_base_offset + threadIdx.x) * lda + (blockIdx.x * TILE_SIZE + threadIdx.y);
int mat_b_global_read_offset =
(k_iter_base_offset + threadIdx.x) * ldb + (blockIdx.y * TILE_SIZE + threadIdx.y);
// copy global mem tiles to shared mem
mat_a_shared_tile[mat_a_shared_write_offset] = A[mat_a_global_read_offset];
mat_b_shared_tile[mat_b_shared_write_offset] = B[mat_b_global_read_offset];
// sync after copy
__syncthreads();
// dot product loop
#pragma unroll
for (int dp_iter = 0; dp_iter < TILE_SIZE; dp_iter++) {
int mat_a_read_offset = transa == CUBLAS_OP_T ?
threadIdx.x * TILE_SIZE + dp_iter :
threadIdx.x + TILE_SIZE * dp_iter;
int mat_b_read_offset = transb == CUBLAS_OP_T ?
threadIdx.y * TILE_SIZE + dp_iter :
threadIdx.y + TILE_SIZE * dp_iter;
result += alpha * mat_a_shared_tile[mat_a_read_offset] * mat_b_shared_tile[mat_b_read_offset];
}
// sync to make sure shared mem can be overwritten
__syncthreads();
}
// save result to global mem
C[mat_c_write_offset] = result;
}
__global__ void myDgemmKernel_32x32_shared_mem(
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double alpha,
const double* A, int lda,
const double* B, int ldb,
const double beta,
double* C, int ldc)
{
__shared__ double mat_a_shared_tile[TILE_SIZE * TILE_SIZE];
__shared__ double mat_b_shared_tile[TILE_SIZE * TILE_SIZE];
// get initial result value from global mem
int m_read_offset = blockIdx.x * TILE_SIZE + threadIdx.x;
int n_read_offset = blockIdx.y * TILE_SIZE + threadIdx.y;
int mat_c_write_offset = m_read_offset + n_read_offset * ldc;
double result = C[mat_c_write_offset] * beta;
for (int k_iter = 0; k_iter < k / TILE_SIZE; k_iter++) {
int k_iter_base_offset = k_iter * TILE_SIZE;
// matrix A&B write offsets for shared memory
int mat_a_shared_write_offset = threadIdx.x * TILE_SIZE + threadIdx.y;
int mat_b_shared_write_offset = threadIdx.y * TILE_SIZE + threadIdx.x;
// matrix A&B read offsets for global memory
int mat_a_global_read_offset = transa == CUBLAS_OP_T ?
(k_iter_base_offset + threadIdx.x) + lda * (blockIdx.x * TILE_SIZE + threadIdx.y) :
(k_iter_base_offset + threadIdx.x) * lda + (blockIdx.x * TILE_SIZE + threadIdx.y);
int mat_b_global_read_offset = transb == CUBLAS_OP_T ?
(k_iter_base_offset + threadIdx.x) * ldb + (blockIdx.y * TILE_SIZE + threadIdx.y) :
(k_iter_base_offset + threadIdx.x) + ldb * (blockIdx.y * TILE_SIZE + threadIdx.y);
// copy global mem tiles to shared mem
mat_a_shared_tile[mat_a_shared_write_offset] = A[mat_a_global_read_offset];
mat_b_shared_tile[mat_b_shared_write_offset] = B[mat_b_global_read_offset];
// sync after copy
__syncthreads();
// dot product loop
#pragma unroll
for (int dp_iter = 0; dp_iter < TILE_SIZE; dp_iter++) {
int mat_a_read_offset = threadIdx.x + TILE_SIZE * dp_iter;
int mat_b_read_offset = threadIdx.y * TILE_SIZE + dp_iter;
result += alpha * mat_a_shared_tile[mat_a_read_offset] * mat_b_shared_tile[mat_b_read_offset];
}
// sync to make sure shared mem can be overwritten
__syncthreads();
}
// save result to global mem
C[mat_c_write_offset] = result;
}
__global__ void myDgemmKernel_opt_32x32(
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double alpha,
const double* A, int lda,
const double* B, int ldb,
const double beta,
double* C, int ldc)
{
int m_read_offset = blockIdx.x * TILE_M + threadIdx.x;
int n_read_offset = blockIdx.y * TILE_N + threadIdx.y;
int mat_c_write_offset = m_read_offset + n_read_offset * ldc;
double result = C[mat_c_write_offset] * beta;
for (int k_iter = 0; k_iter < k; k_iter++) {
int mat_a_read_offset = transa == CUBLAS_OP_T ?
m_read_offset * lda + k_iter :
m_read_offset + lda * k_iter;
int mat_b_read_offset = transb == CUBLAS_OP_T ?
n_read_offset + ldb * k_iter :
n_read_offset * ldb + k_iter;
result += alpha * A[mat_a_read_offset] * B[mat_b_read_offset];
}
C[mat_c_write_offset] = result;
}
cudaReturnValue myDgemmHostCodeOpt(
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double* alpha,
const double* A, int lda,
const double* B, int ldb,
const double* beta,
double* C, int ldc) {
double* dev_A = 0;
const int dev_A_size = m * k * sizeof(double);
double* dev_B = 0;
const int dev_B_size = n * k * sizeof(double);
double* dev_C = 0;
const int dev_C_size = m * n * sizeof(double);
cudaError_t cudaStatus;
double executionTime = -1.;
cublasHandle_t handle;
cublasStatus_t stat;
dim3 numBlocks(m / TILE_M, n / TILE_N);
dim3 threadsPerBlock(TILE_M, TILE_N);
clock_t t;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)& dev_A, dev_A_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_B, dev_B_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_C, dev_C_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "CUBLAS initialization failed\n");
cudaStatus = cudaErrorNotSupported;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_A, A, dev_A_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_B, B, dev_B_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_C, C, dev_C_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// start time measurement
t = clock();
// Launch a kernel on the GPU with one thread for each element.
SELECTED_KERNEL << <numBlocks, threadsPerBlock >> > (
transa, transb,
m, n, k,
*alpha,
dev_A, lda,
dev_B, ldb,
*beta,
dev_C, ldc
);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// measure time
t = clock() - t;
executionTime = ((double)t) / CLOCKS_PER_SEC;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(C, dev_C, dev_C_size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
return { cudaStatus, executionTime };
}
|
76d86cba979e91abe1e493d56a4063313d798f52.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ARQUITECTURA DE COMPUTADORES
* 2 Grado en Ingenieria Informatica
*
* PRACTICA 2: "Ordenacin de Array De Menor a Mayor".
* >> TODO => Finalizado.
*
* AUTOR: Ivn Ruiz Gzquez e Ivn Maeso Adrin.
*/
///////////////////////////////////////////////////////////////////////////
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
// Defines
#define RAN_MIN 1
#define RAN_MAX 50
// Declaracion de funciones
void cudaDev()
{
// Saca num hilos, funcion CUDA
int dev;
hipGetDevice(&dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
//
printf("\n***********************************************************************\n\n");
printf("> Nombre Dispositivos: %s\n", deviceProp.name);
printf("> Capacidad de Computo: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("> Numero de MultiProcesadores: %d \n", deviceProp.multiProcessorCount);
printf("> Numero de Nucleos (Arq. PASCAL): %d \n", 64);
printf("> Maximo de hilos por eje en bloque\n");
printf(" \t[x -> %d]\n \t[y -> %d]\n \t[z -> %d]\n",deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("> Maximo de bloques por eje\n");
printf(" \t[x -> %d]\n \t[y -> %d]\n \t[z -> %d]\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("\n***********************************************************************\n");
}
__global__
void ordenarArray(int *dev_desordenado, int *dev_ordenado, int elem)
{
int myID = threadIdx.x;
int rango = 0;
for(int i=0; i<elem; i++) {
if((dev_desordenado[myID] > dev_desordenado[i]) && (myID != i))
rango++;
if(dev_desordenado[myID] == dev_desordenado[i] && myID > i)
rango++;
}
dev_ordenado[rango] = dev_desordenado[myID];
}
// MAIN: Rutina principal ejecutada en el host
int main(int argc, char** argv)
{
//Eventos
hipEvent_t start;
hipEvent_t stop;
// Creacion de eventos
hipEventCreate(&start);
hipEventCreate(&stop);
// Marca de inicio
hipEventRecord(start, 0);
// Declaracion
int *hst_desordenado;
int *hst_ordenado;
int *dev_desordenado;
int *dev_ordenado;
// Elementos
int elem;
// Llama a la funcin Cuda que devuelve info
cudaDev();
// Pregunta nmero de elemetos
do {
printf("\n\nNumero de elementos (MAX=1024): ");
scanf("%d", &elem);
getchar();
} while (elem<=0 || elem>1024);
// Dimensiones del kernel
dim3 Nbloques(1);
dim3 hilosB(elem);
// Reserva en el host
hst_ordenado = (int*)malloc(elem * sizeof(int));
hst_desordenado = (int*)malloc(elem * sizeof(int));
// Reserva en el device
hipMalloc( &dev_ordenado, elem * sizeof(int));
hipMalloc( &dev_desordenado, elem * sizeof(int));
// Insertamos valores random en la matriz
srand((int)time(NULL));
for (int i = 0; i < elem; i++)
{
hst_desordenado[i] = RAN_MIN + rand() % RAN_MAX;
}
// Pasamos el array al device y le damos la vuelta
hipMemcpy(dev_desordenado, hst_desordenado, elem * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ordenarArray) , dim3(Nbloques), dim3(hilosB), 0, 0, dev_desordenado, dev_ordenado, elem);
// Check de errores
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "ERROR: %s\n", hipGetErrorString(error));
exit(-1);
}
// Pasamos el resultado a la cpu
hipMemcpy(hst_ordenado, dev_ordenado, elem * sizeof(int), hipMemcpyDeviceToHost);
// Muestra contenido de arrays y resultado
printf("\n\nMatriz Desordenada: \n");
printf("*********************\n");
for (int i = 0; i < elem; i++) {
printf("%d ", hst_desordenado[i]);
}
printf("\n\nMatriz Ordenada: \n");
printf("*********************\n");
for (int i = 0; i < elem; i++) {
printf("%d ", hst_ordenado[i]);
}
// Marca de final
hipEventRecord(stop, 0);
// Sincronizacion CPU-GPU
hipEventSynchronize(stop);
// Calculo del tiempo
float tiempoTrans;
hipEventElapsedTime(&tiempoTrans, start, stop);
printf("\n\n\n> Tiempo de ejecuccion: %f ms\n", tiempoTrans);
// Liberacion de recursos
free(hst_desordenado);
free(hst_ordenado);
hipFree(dev_desordenado);
hipFree(dev_ordenado);
hipEventDestroy(start);
hipEventDestroy(stop);
// Salida
time_t fecha;
time(&fecha);
printf("\n\n***************************************************\n");
printf("Programa ejecutado el: %s\n", ctime(&fecha));
printf("<pulsa [INTRO] para finalizar>");
getchar();
return 0;
}
| 76d86cba979e91abe1e493d56a4063313d798f52.cu | /*
* ARQUITECTURA DE COMPUTADORES
* 2º Grado en Ingenieria Informatica
*
* PRACTICA 2: "Ordenación de Array De Menor a Mayor".
* >> TODO => Finalizado.
*
* AUTOR: Iván Ruiz Gázquez e Iván Maeso Adrián.
*/
///////////////////////////////////////////////////////////////////////////
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defines
#define RAN_MIN 1
#define RAN_MAX 50
// Declaracion de funciones
void cudaDev()
{
// Saca num hilos, funcion CUDA
int dev;
cudaGetDevice(&dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
//
printf("\n***********************************************************************\n\n");
printf("> Nombre Dispositivos: %s\n", deviceProp.name);
printf("> Capacidad de Computo: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("> Numero de MultiProcesadores: %d \n", deviceProp.multiProcessorCount);
printf("> Numero de Nucleos (Arq. PASCAL): %d \n", 64);
printf("> Maximo de hilos por eje en bloque\n");
printf(" \t[x -> %d]\n \t[y -> %d]\n \t[z -> %d]\n",deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("> Maximo de bloques por eje\n");
printf(" \t[x -> %d]\n \t[y -> %d]\n \t[z -> %d]\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("\n***********************************************************************\n");
}
__global__
void ordenarArray(int *dev_desordenado, int *dev_ordenado, int elem)
{
int myID = threadIdx.x;
int rango = 0;
for(int i=0; i<elem; i++) {
if((dev_desordenado[myID] > dev_desordenado[i]) && (myID != i))
rango++;
if(dev_desordenado[myID] == dev_desordenado[i] && myID > i)
rango++;
}
dev_ordenado[rango] = dev_desordenado[myID];
}
// MAIN: Rutina principal ejecutada en el host
int main(int argc, char** argv)
{
//Eventos
cudaEvent_t start;
cudaEvent_t stop;
// Creacion de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Marca de inicio
cudaEventRecord(start, 0);
// Declaracion
int *hst_desordenado;
int *hst_ordenado;
int *dev_desordenado;
int *dev_ordenado;
// Elementos
int elem;
// Llama a la función Cuda que devuelve info
cudaDev();
// Pregunta número de elemetos
do {
printf("\n\nNumero de elementos (MAX=1024): ");
scanf("%d", &elem);
getchar();
} while (elem<=0 || elem>1024);
// Dimensiones del kernel
dim3 Nbloques(1);
dim3 hilosB(elem);
// Reserva en el host
hst_ordenado = (int*)malloc(elem * sizeof(int));
hst_desordenado = (int*)malloc(elem * sizeof(int));
// Reserva en el device
cudaMalloc( &dev_ordenado, elem * sizeof(int));
cudaMalloc( &dev_desordenado, elem * sizeof(int));
// Insertamos valores random en la matriz
srand((int)time(NULL));
for (int i = 0; i < elem; i++)
{
hst_desordenado[i] = RAN_MIN + rand() % RAN_MAX;
}
// Pasamos el array al device y le damos la vuelta
cudaMemcpy(dev_desordenado, hst_desordenado, elem * sizeof(int), cudaMemcpyHostToDevice);
ordenarArray <<<Nbloques, hilosB>>>(dev_desordenado, dev_ordenado, elem);
// Check de errores
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(error));
exit(-1);
}
// Pasamos el resultado a la cpu
cudaMemcpy(hst_ordenado, dev_ordenado, elem * sizeof(int), cudaMemcpyDeviceToHost);
// Muestra contenido de arrays y resultado
printf("\n\nMatriz Desordenada: \n");
printf("*********************\n");
for (int i = 0; i < elem; i++) {
printf("%d ", hst_desordenado[i]);
}
printf("\n\nMatriz Ordenada: \n");
printf("*********************\n");
for (int i = 0; i < elem; i++) {
printf("%d ", hst_ordenado[i]);
}
// Marca de final
cudaEventRecord(stop, 0);
// Sincronizacion CPU-GPU
cudaEventSynchronize(stop);
// Calculo del tiempo
float tiempoTrans;
cudaEventElapsedTime(&tiempoTrans, start, stop);
printf("\n\n\n> Tiempo de ejecuccion: %f ms\n", tiempoTrans);
// Liberacion de recursos
free(hst_desordenado);
free(hst_ordenado);
cudaFree(dev_desordenado);
cudaFree(dev_ordenado);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Salida
time_t fecha;
time(&fecha);
printf("\n\n***************************************************\n");
printf("Programa ejecutado el: %s\n", ctime(&fecha));
printf("<pulsa [INTRO] para finalizar>");
getchar();
return 0;
}
|
5e81a57382299e624e573905c9b173bc5240c7ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include "modulated_deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void modulated_deformable_im2col_cuda(
const Tensor data_im, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_im_, data_offset_, data_mask_, height_im,
width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, batch_size,
channels, deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void modulated_deformable_col2im_cuda(
const Tensor data_col, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor grad_im) {
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels =
channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_offset_, data_mask_, channels,
height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
void modulated_deformable_col2im_coord_cuda(
const Tensor data_col, const Tensor data_im, const Tensor data_offset,
const Tensor data_mask, const int batch_size, const int channels,
const int height_im, const int width_im, const int height_col,
const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int deformable_group,
Tensor grad_offset, Tensor grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h *
kernel_w * deformable_group;
const int channel_per_deformable_group =
channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel),
dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_kernels, data_col_, data_im_, data_offset_, data_mask_,
channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size,
2 * kernel_h * kernel_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_, grad_mask_);
}));
AT_CUDA_CHECK(hipGetLastError());
}
| 5e81a57382299e624e573905c9b173bc5240c7ba.cu | // Copyright (c) OpenMMLab. All rights reserved
#include "modulated_deform_conv_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void modulated_deformable_im2col_cuda(
const Tensor data_im, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im,
width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group, batch_size,
channels, deformable_group, height_col, width_col, data_col_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void modulated_deformable_col2im_cuda(
const Tensor data_col, const Tensor data_offset, const Tensor data_mask,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, Tensor grad_im) {
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels =
channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_offset_, data_mask_, channels,
height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
void modulated_deformable_col2im_coord_cuda(
const Tensor data_col, const Tensor data_im, const Tensor data_offset,
const Tensor data_mask, const int batch_size, const int channels,
const int height_im, const int width_im, const int height_col,
const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int deformable_group,
Tensor grad_offset, Tensor grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h *
kernel_w * deformable_group;
const int channel_per_deformable_group =
channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<
GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0,
at::cuda::getCurrentCUDAStream()>>>(
num_kernels, data_col_, data_im_, data_offset_, data_mask_,
channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size,
2 * kernel_h * kernel_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_, grad_mask_);
}));
AT_CUDA_CHECK(cudaGetLastError());
}
|
52ed2a8f4f6be345178b0846691484001daa1a36.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define N 100
__global__ void mul(int a[][N], int b[][N], int c[][N]){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int col = blockIdx.y*blockDim.y+threadIdx.y;
if(row < N && col < N) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
c[i][j] = 0;
for(int k = 0; k < N; k++) {
c[i][j] += a[i][k] * b[k][j];
}
}
}
}
}
int main(){
int (*pa)[N], (*pb)[N], (*pc)[N];
int a[N][N], b[N][N], c[N][N];
srand((unsigned)time(NULL));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMalloc((void**)&pa, (N*N) * sizeof(int));
hipMalloc((void**)&pb, (N*N) * sizeof(int));
hipMalloc((void**)&pc, (N*N) * sizeof(int));
for(int i = 0 ; i<N ; i++){
for(int j = 0 ; j<N ; j++) {
a[i][j] = rand()%10 + 1;
b[i][j] = rand()%10 + 1;
}
}
hipMemcpy(pa, a, (N*N) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(pb, b, (N*N) * sizeof(int), hipMemcpyHostToDevice);
dim3 threadsPerThreads(N,N);
hipLaunchKernelGGL(( mul), dim3(1),dim3(threadsPerThreads), 0, 0, pa, pb, pc);
hipMemcpy(c, pc, (N*N) * sizeof(int), hipMemcpyDeviceToHost);
printf("matrix multiplication per thread\n");
/* for(int i = 0 ; i<N ; i++){
for(int j = 0 ; j<N ; j++) {
printf("%d ",c[i][j]);
}
printf("\n");
}
*/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Time to generate : %3.1f ms\n", elapsedTime);
hipFree(pa);
hipFree(pb);
hipFree(pc);
return 0;
}
| 52ed2a8f4f6be345178b0846691484001daa1a36.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define N 100
__global__ void mul(int a[][N], int b[][N], int c[][N]){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int col = blockIdx.y*blockDim.y+threadIdx.y;
if(row < N && col < N) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
c[i][j] = 0;
for(int k = 0; k < N; k++) {
c[i][j] += a[i][k] * b[k][j];
}
}
}
}
}
int main(){
int (*pa)[N], (*pb)[N], (*pc)[N];
int a[N][N], b[N][N], c[N][N];
srand((unsigned)time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&pa, (N*N) * sizeof(int));
cudaMalloc((void**)&pb, (N*N) * sizeof(int));
cudaMalloc((void**)&pc, (N*N) * sizeof(int));
for(int i = 0 ; i<N ; i++){
for(int j = 0 ; j<N ; j++) {
a[i][j] = rand()%10 + 1;
b[i][j] = rand()%10 + 1;
}
}
cudaMemcpy(pa, a, (N*N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pb, b, (N*N) * sizeof(int), cudaMemcpyHostToDevice);
dim3 threadsPerThreads(N,N);
mul<<<1,threadsPerThreads>>>(pa, pb, pc);
cudaMemcpy(c, pc, (N*N) * sizeof(int), cudaMemcpyDeviceToHost);
printf("matrix multiplication per thread\n");
/* for(int i = 0 ; i<N ; i++){
for(int j = 0 ; j<N ; j++) {
printf("%d ",c[i][j]);
}
printf("\n");
}
*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time to generate : %3.1f ms\n", elapsedTime);
cudaFree(pa);
cudaFree(pb);
cudaFree(pc);
return 0;
}
|
d3af00a051a998be0d137e2f292da4dc11f6dd16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgerbt_func_batched.cu normal z -> s, Sat Nov 15 19:53:59 2014
@author Adrien Remy
@author Azzam Haidar
*/
#include "common_magma.h"
#include "sgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_sprbt_mtv_batched_q(
magma_int_t n,
float *du, float **db_array,
magma_queue_t queue, magma_int_t batchCount)
{
/*
*/
magma_int_t threads = block_length;
dim3 grid ( n/(4*block_length) + ((n%(4*block_length))!=0), batchCount);
hipLaunchKernelGGL(( magmablas_sapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, du, n, db_array, 0);
hipLaunchKernelGGL(( magmablas_sapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, du, n+n/2, db_array, n/2);
threads = block_length;
grid = n/(2*block_length) + ((n%(2*block_length))!=0), batchCount;
hipLaunchKernelGGL(( magmablas_sapply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, du, 0, db_array, 0);
}
/**
@see magmablas_sprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_sprbt_mtv_batched(
magma_int_t n,
float *du, float **db_array, magma_int_t batchCount)
{
magmablas_sprbt_mtv_batched_q(n, du, db_array, magma_stream, batchCount);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_sprbt_mv_batched_q(
magma_int_t n,
float *dv, float **db_array,
magma_queue_t queue, magma_int_t batchCount)
{
magma_int_t threads = block_length;
dim3 grid ( n/(2*block_length) + ((n%(2*block_length))!=0), batchCount);
hipLaunchKernelGGL(( magmablas_sapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dv, 0, db_array, 0);
threads = block_length;
grid = n/(4*block_length) + ((n%(4*block_length))!=0), batchCount;
hipLaunchKernelGGL(( magmablas_sapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dv, n, db_array, 0);
hipLaunchKernelGGL(( magmablas_sapply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dv, n+n/2, db_array, n/2);
}
/**
@see magmablas_sprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_sprbt_mv_batched(
magma_int_t n,
float *dv, float **db_array, magma_int_t batchCount)
{
magmablas_sprbt_mv_batched_q(n, dv, db_array, magma_stream, batchCount);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA REAL array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_sprbt_batched_q(
magma_int_t n,
float **dA_array, magma_int_t ldda,
float *du, float *dv,
magma_queue_t queue, magma_int_t batchCount)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid(n/(4*block_height) + ((n%(4*block_height))!=0),
n/(4*block_width) + ((n%(4*block_width))!=0),
batchCount);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, 0, ldda, du, 0, dv, 0);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, n/2, ldda, du, n/2, dv, 0);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue , n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2(n/(2*block_height) + ((n%(2*block_height))!=0),
n/(2*block_width) + ((n%(2*block_width))!=0),
batchCount);
hipLaunchKernelGGL(( magmablas_selementary_multiplication_kernel_batched), dim3(grid2), dim3(threads2), 0, queue , n, dA_array, 0, ldda, du, -ldda, dv, -ldda);
}
/**
@see magmablas_sprbt_q
********************************************************************/
extern "C" void
magmablas_sprbt_batched(
magma_int_t n,
float **dA_array, magma_int_t ldda,
float *du, float *dv,
magma_int_t batchCount)
{
magmablas_sprbt_batched_q(n, dA_array, ldda, du, dv, magma_stream, batchCount);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
| d3af00a051a998be0d137e2f292da4dc11f6dd16.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgerbt_func_batched.cu normal z -> s, Sat Nov 15 19:53:59 2014
@author Adrien Remy
@author Azzam Haidar
*/
#include "common_magma.h"
#include "sgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_sprbt_mtv_batched_q(
magma_int_t n,
float *du, float **db_array,
magma_queue_t queue, magma_int_t batchCount)
{
/*
*/
magma_int_t threads = block_length;
dim3 grid ( n/(4*block_length) + ((n%(4*block_length))!=0), batchCount);
magmablas_sapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, du, n, db_array, 0);
magmablas_sapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, du, n+n/2, db_array, n/2);
threads = block_length;
grid = n/(2*block_length) + ((n%(2*block_length))!=0), batchCount;
magmablas_sapply_transpose_vector_kernel_batched<<< grid, threads, 0, queue >>>(n, du, 0, db_array, 0);
}
/**
@see magmablas_sprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_sprbt_mtv_batched(
magma_int_t n,
float *du, float **db_array, magma_int_t batchCount)
{
magmablas_sprbt_mtv_batched_q(n, du, db_array, magma_stream, batchCount);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db REAL array, dimension (n)
The n vector db computed by SGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_sprbt_mv_batched_q(
magma_int_t n,
float *dv, float **db_array,
magma_queue_t queue, magma_int_t batchCount)
{
magma_int_t threads = block_length;
dim3 grid ( n/(2*block_length) + ((n%(2*block_length))!=0), batchCount);
magmablas_sapply_vector_kernel_batched<<< grid, threads, 0, queue >>>(n, dv, 0, db_array, 0);
threads = block_length;
grid = n/(4*block_length) + ((n%(4*block_length))!=0), batchCount;
magmablas_sapply_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dv, n, db_array, 0);
magmablas_sapply_vector_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dv, n+n/2, db_array, n/2);
}
/**
@see magmablas_sprbt_mtv_q
********************************************************************/
extern "C" void
magmablas_sprbt_mv_batched(
magma_int_t n,
float *dv, float **db_array, magma_int_t batchCount)
{
magmablas_sprbt_mv_batched_q(n, dv, db_array, magma_stream, batchCount);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA REAL array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv REAL array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
********************************************************************/
extern "C" void
magmablas_sprbt_batched_q(
magma_int_t n,
float **dA_array, magma_int_t ldda,
float *du, float *dv,
magma_queue_t queue, magma_int_t batchCount)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid(n/(4*block_height) + ((n%(4*block_height))!=0),
n/(4*block_width) + ((n%(4*block_width))!=0),
batchCount);
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, 0, ldda, du, 0, dv, 0);
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2);
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, n/2, ldda, du, n/2, dv, 0);
magmablas_selementary_multiplication_kernel_batched<<< grid, threads, 0, queue >>>(n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2(n/(2*block_height) + ((n%(2*block_height))!=0),
n/(2*block_width) + ((n%(2*block_width))!=0),
batchCount);
magmablas_selementary_multiplication_kernel_batched<<< grid2, threads2, 0, queue >>>(n, dA_array, 0, ldda, du, -ldda, dv, -ldda);
}
/**
@see magmablas_sprbt_q
********************************************************************/
extern "C" void
magmablas_sprbt_batched(
magma_int_t n,
float **dA_array, magma_int_t ldda,
float *du, float *dv,
magma_int_t batchCount)
{
magmablas_sprbt_batched_q(n, dA_array, ldda, du, dv, magma_stream, batchCount);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
71b2a8e5f790b754a0abc295e0640f64cc4bafc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "haraka_cuda.h"
#include <stdint.h>
__device__ const uint32_t Te0[256] = { \
0xa56363c6U, 0x847c7cf8U, 0x997777eeU, 0x8d7b7bf6U,\
0x0df2f2ffU, 0xbd6b6bd6U, 0xb16f6fdeU, 0x54c5c591U,\
0x50303060U, 0x03010102U, 0xa96767ceU, 0x7d2b2b56U,\
0x19fefee7U, 0x62d7d7b5U, 0xe6abab4dU, 0x9a7676ecU,\
0x45caca8fU, 0x9d82821fU, 0x40c9c989U, 0x877d7dfaU,\
0x15fafaefU, 0xeb5959b2U, 0xc947478eU, 0x0bf0f0fbU,\
0xecadad41U, 0x67d4d4b3U, 0xfda2a25fU, 0xeaafaf45U,\
0xbf9c9c23U, 0xf7a4a453U, 0x967272e4U, 0x5bc0c09bU,\
0xc2b7b775U, 0x1cfdfde1U, 0xae93933dU, 0x6a26264cU,\
0x5a36366cU, 0x413f3f7eU, 0x02f7f7f5U, 0x4fcccc83U,\
0x5c343468U, 0xf4a5a551U, 0x34e5e5d1U, 0x08f1f1f9U,\
0x937171e2U, 0x73d8d8abU, 0x53313162U, 0x3f15152aU,\
0x0c040408U, 0x52c7c795U, 0x65232346U, 0x5ec3c39dU,\
0x28181830U, 0xa1969637U, 0x0f05050aU, 0xb59a9a2fU,\
0x0907070eU, 0x36121224U, 0x9b80801bU, 0x3de2e2dfU,\
0x26ebebcdU, 0x6927274eU, 0xcdb2b27fU, 0x9f7575eaU,\
0x1b090912U, 0x9e83831dU, 0x742c2c58U, 0x2e1a1a34U,\
0x2d1b1b36U, 0xb26e6edcU, 0xee5a5ab4U, 0xfba0a05bU,\
0xf65252a4U, 0x4d3b3b76U, 0x61d6d6b7U, 0xceb3b37dU,\
0x7b292952U, 0x3ee3e3ddU, 0x712f2f5eU, 0x97848413U,\
0xf55353a6U, 0x68d1d1b9U, 0x00000000U, 0x2cededc1U,\
0x60202040U, 0x1ffcfce3U, 0xc8b1b179U, 0xed5b5bb6U,\
0xbe6a6ad4U, 0x46cbcb8dU, 0xd9bebe67U, 0x4b393972U,\
0xde4a4a94U, 0xd44c4c98U, 0xe85858b0U, 0x4acfcf85U,\
0x6bd0d0bbU, 0x2aefefc5U, 0xe5aaaa4fU, 0x16fbfbedU,\
0xc5434386U, 0xd74d4d9aU, 0x55333366U, 0x94858511U,\
0xcf45458aU, 0x10f9f9e9U, 0x06020204U, 0x817f7ffeU,\
0xf05050a0U, 0x443c3c78U, 0xba9f9f25U, 0xe3a8a84bU,\
0xf35151a2U, 0xfea3a35dU, 0xc0404080U, 0x8a8f8f05U,\
0xad92923fU, 0xbc9d9d21U, 0x48383870U, 0x04f5f5f1U,\
0xdfbcbc63U, 0xc1b6b677U, 0x75dadaafU, 0x63212142U,\
0x30101020U, 0x1affffe5U, 0x0ef3f3fdU, 0x6dd2d2bfU,\
0x4ccdcd81U, 0x140c0c18U, 0x35131326U, 0x2fececc3U,\
0xe15f5fbeU, 0xa2979735U, 0xcc444488U, 0x3917172eU,\
0x57c4c493U, 0xf2a7a755U, 0x827e7efcU, 0x473d3d7aU,\
0xac6464c8U, 0xe75d5dbaU, 0x2b191932U, 0x957373e6U,\
0xa06060c0U, 0x98818119U, 0xd14f4f9eU, 0x7fdcdca3U,\
0x66222244U, 0x7e2a2a54U, 0xab90903bU, 0x8388880bU,\
0xca46468cU, 0x29eeeec7U, 0xd3b8b86bU, 0x3c141428U,\
0x79dedea7U, 0xe25e5ebcU, 0x1d0b0b16U, 0x76dbdbadU,\
0x3be0e0dbU, 0x56323264U, 0x4e3a3a74U, 0x1e0a0a14U,\
0xdb494992U, 0x0a06060cU, 0x6c242448U, 0xe45c5cb8U,\
0x5dc2c29fU, 0x6ed3d3bdU, 0xefacac43U, 0xa66262c4U,\
0xa8919139U, 0xa4959531U, 0x37e4e4d3U, 0x8b7979f2U,\
0x32e7e7d5U, 0x43c8c88bU, 0x5937376eU, 0xb76d6ddaU,\
0x8c8d8d01U, 0x64d5d5b1U, 0xd24e4e9cU, 0xe0a9a949U,\
0xb46c6cd8U, 0xfa5656acU, 0x07f4f4f3U, 0x25eaeacfU,\
0xaf6565caU, 0x8e7a7af4U, 0xe9aeae47U, 0x18080810U,\
0xd5baba6fU, 0x887878f0U, 0x6f25254aU, 0x722e2e5cU,\
0x241c1c38U, 0xf1a6a657U, 0xc7b4b473U, 0x51c6c697U,\
0x23e8e8cbU, 0x7cdddda1U, 0x9c7474e8U, 0x211f1f3eU,\
0xdd4b4b96U, 0xdcbdbd61U, 0x868b8b0dU, 0x858a8a0fU,\
0x907070e0U, 0x423e3e7cU, 0xc4b5b571U, 0xaa6666ccU,\
0xd8484890U, 0x05030306U, 0x01f6f6f7U, 0x120e0e1cU,\
0xa36161c2U, 0x5f35356aU, 0xf95757aeU, 0xd0b9b969U,\
0x91868617U, 0x58c1c199U, 0x271d1d3aU, 0xb99e9e27U,\
0x38e1e1d9U, 0x13f8f8ebU, 0xb398982bU, 0x33111122U,\
0xbb6969d2U, 0x70d9d9a9U, 0x898e8e07U, 0xa7949433U,\
0xb69b9b2dU, 0x221e1e3cU, 0x92878715U, 0x20e9e9c9U,\
0x49cece87U, 0xff5555aaU, 0x78282850U, 0x7adfdfa5U,\
0x8f8c8c03U, 0xf8a1a159U, 0x80898909U, 0x170d0d1aU,\
0xdabfbf65U, 0x31e6e6d7U, 0xc6424284U, 0xb86868d0U,\
0xc3414182U, 0xb0999929U, 0x772d2d5aU, 0x110f0f1eU,\
0xcbb0b07bU, 0xfc5454a8U, 0xd6bbbb6dU, 0x3a16162cU \
};
__device__ const uint32_t Te1[256] = { \
0x6363c6a5U, 0x7c7cf884U, 0x7777ee99U, 0x7b7bf68dU,\
0xf2f2ff0dU, 0x6b6bd6bdU, 0x6f6fdeb1U, 0xc5c59154U,\
0x30306050U, 0x01010203U, 0x6767cea9U, 0x2b2b567dU,\
0xfefee719U, 0xd7d7b562U, 0xabab4de6U, 0x7676ec9aU,\
0xcaca8f45U, 0x82821f9dU, 0xc9c98940U, 0x7d7dfa87U,\
0xfafaef15U, 0x5959b2ebU, 0x47478ec9U, 0xf0f0fb0bU,\
0xadad41ecU, 0xd4d4b367U, 0xa2a25ffdU, 0xafaf45eaU,\
0x9c9c23bfU, 0xa4a453f7U, 0x7272e496U, 0xc0c09b5bU,\
0xb7b775c2U, 0xfdfde11cU, 0x93933daeU, 0x26264c6aU,\
0x36366c5aU, 0x3f3f7e41U, 0xf7f7f502U, 0xcccc834fU,\
0x3434685cU, 0xa5a551f4U, 0xe5e5d134U, 0xf1f1f908U,\
0x7171e293U, 0xd8d8ab73U, 0x31316253U, 0x15152a3fU,\
0x0404080cU, 0xc7c79552U, 0x23234665U, 0xc3c39d5eU,\
0x18183028U, 0x969637a1U, 0x05050a0fU, 0x9a9a2fb5U,\
0x07070e09U, 0x12122436U, 0x80801b9bU, 0xe2e2df3dU,\
0xebebcd26U, 0x27274e69U, 0xb2b27fcdU, 0x7575ea9fU,\
0x0909121bU, 0x83831d9eU, 0x2c2c5874U, 0x1a1a342eU,\
0x1b1b362dU, 0x6e6edcb2U, 0x5a5ab4eeU, 0xa0a05bfbU,\
0x5252a4f6U, 0x3b3b764dU, 0xd6d6b761U, 0xb3b37dceU,\
0x2929527bU, 0xe3e3dd3eU, 0x2f2f5e71U, 0x84841397U,\
0x5353a6f5U, 0xd1d1b968U, 0x00000000U, 0xededc12cU,\
0x20204060U, 0xfcfce31fU, 0xb1b179c8U, 0x5b5bb6edU,\
0x6a6ad4beU, 0xcbcb8d46U, 0xbebe67d9U, 0x3939724bU,\
0x4a4a94deU, 0x4c4c98d4U, 0x5858b0e8U, 0xcfcf854aU,\
0xd0d0bb6bU, 0xefefc52aU, 0xaaaa4fe5U, 0xfbfbed16U,\
0x434386c5U, 0x4d4d9ad7U, 0x33336655U, 0x85851194U,\
0x45458acfU, 0xf9f9e910U, 0x02020406U, 0x7f7ffe81U,\
0x5050a0f0U, 0x3c3c7844U, 0x9f9f25baU, 0xa8a84be3U,\
0x5151a2f3U, 0xa3a35dfeU, 0x404080c0U, 0x8f8f058aU,\
0x92923fadU, 0x9d9d21bcU, 0x38387048U, 0xf5f5f104U,\
0xbcbc63dfU, 0xb6b677c1U, 0xdadaaf75U, 0x21214263U,\
0x10102030U, 0xffffe51aU, 0xf3f3fd0eU, 0xd2d2bf6dU,\
0xcdcd814cU, 0x0c0c1814U, 0x13132635U, 0xececc32fU,\
0x5f5fbee1U, 0x979735a2U, 0x444488ccU, 0x17172e39U,\
0xc4c49357U, 0xa7a755f2U, 0x7e7efc82U, 0x3d3d7a47U,\
0x6464c8acU, 0x5d5dbae7U, 0x1919322bU, 0x7373e695U,\
0x6060c0a0U, 0x81811998U, 0x4f4f9ed1U, 0xdcdca37fU,\
0x22224466U, 0x2a2a547eU, 0x90903babU, 0x88880b83U,\
0x46468ccaU, 0xeeeec729U, 0xb8b86bd3U, 0x1414283cU,\
0xdedea779U, 0x5e5ebce2U, 0x0b0b161dU, 0xdbdbad76U,\
0xe0e0db3bU, 0x32326456U, 0x3a3a744eU, 0x0a0a141eU,\
0x494992dbU, 0x06060c0aU, 0x2424486cU, 0x5c5cb8e4U,\
0xc2c29f5dU, 0xd3d3bd6eU, 0xacac43efU, 0x6262c4a6U,\
0x919139a8U, 0x959531a4U, 0xe4e4d337U, 0x7979f28bU,\
0xe7e7d532U, 0xc8c88b43U, 0x37376e59U, 0x6d6ddab7U,\
0x8d8d018cU, 0xd5d5b164U, 0x4e4e9cd2U, 0xa9a949e0U,\
0x6c6cd8b4U, 0x5656acfaU, 0xf4f4f307U, 0xeaeacf25U,\
0x6565caafU, 0x7a7af48eU, 0xaeae47e9U, 0x08081018U,\
0xbaba6fd5U, 0x7878f088U, 0x25254a6fU, 0x2e2e5c72U,\
0x1c1c3824U, 0xa6a657f1U, 0xb4b473c7U, 0xc6c69751U,\
0xe8e8cb23U, 0xdddda17cU, 0x7474e89cU, 0x1f1f3e21U,\
0x4b4b96ddU, 0xbdbd61dcU, 0x8b8b0d86U, 0x8a8a0f85U,\
0x7070e090U, 0x3e3e7c42U, 0xb5b571c4U, 0x6666ccaaU,\
0x484890d8U, 0x03030605U, 0xf6f6f701U, 0x0e0e1c12U,\
0x6161c2a3U, 0x35356a5fU, 0x5757aef9U, 0xb9b969d0U,\
0x86861791U, 0xc1c19958U, 0x1d1d3a27U, 0x9e9e27b9U,\
0xe1e1d938U, 0xf8f8eb13U, 0x98982bb3U, 0x11112233U,\
0x6969d2bbU, 0xd9d9a970U, 0x8e8e0789U, 0x949433a7U,\
0x9b9b2db6U, 0x1e1e3c22U, 0x87871592U, 0xe9e9c920U,\
0xcece8749U, 0x5555aaffU, 0x28285078U, 0xdfdfa57aU,\
0x8c8c038fU, 0xa1a159f8U, 0x89890980U, 0x0d0d1a17U,\
0xbfbf65daU, 0xe6e6d731U, 0x424284c6U, 0x6868d0b8U,\
0x414182c3U, 0x999929b0U, 0x2d2d5a77U, 0x0f0f1e11U,\
0xb0b07bcbU, 0x5454a8fcU, 0xbbbb6dd6U, 0x16162c3aU \
};
__device__ const uint32_t Te2[256] = { \
0x63c6a563U, 0x7cf8847cU, 0x77ee9977U, 0x7bf68d7bU,\
0xf2ff0df2U, 0x6bd6bd6bU, 0x6fdeb16fU, 0xc59154c5U,\
0x30605030U, 0x01020301U, 0x67cea967U, 0x2b567d2bU,\
0xfee719feU, 0xd7b562d7U, 0xab4de6abU, 0x76ec9a76U,\
0xca8f45caU, 0x821f9d82U, 0xc98940c9U, 0x7dfa877dU,\
0xfaef15faU, 0x59b2eb59U, 0x478ec947U, 0xf0fb0bf0U,\
0xad41ecadU, 0xd4b367d4U, 0xa25ffda2U, 0xaf45eaafU,\
0x9c23bf9cU, 0xa453f7a4U, 0x72e49672U, 0xc09b5bc0U,\
0xb775c2b7U, 0xfde11cfdU, 0x933dae93U, 0x264c6a26U,\
0x366c5a36U, 0x3f7e413fU, 0xf7f502f7U, 0xcc834fccU,\
0x34685c34U, 0xa551f4a5U, 0xe5d134e5U, 0xf1f908f1U,\
0x71e29371U, 0xd8ab73d8U, 0x31625331U, 0x152a3f15U,\
0x04080c04U, 0xc79552c7U, 0x23466523U, 0xc39d5ec3U,\
0x18302818U, 0x9637a196U, 0x050a0f05U, 0x9a2fb59aU,\
0x070e0907U, 0x12243612U, 0x801b9b80U, 0xe2df3de2U,\
0xebcd26ebU, 0x274e6927U, 0xb27fcdb2U, 0x75ea9f75U,\
0x09121b09U, 0x831d9e83U, 0x2c58742cU, 0x1a342e1aU,\
0x1b362d1bU, 0x6edcb26eU, 0x5ab4ee5aU, 0xa05bfba0U,\
0x52a4f652U, 0x3b764d3bU, 0xd6b761d6U, 0xb37dceb3U,\
0x29527b29U, 0xe3dd3ee3U, 0x2f5e712fU, 0x84139784U,\
0x53a6f553U, 0xd1b968d1U, 0x00000000U, 0xedc12cedU,\
0x20406020U, 0xfce31ffcU, 0xb179c8b1U, 0x5bb6ed5bU,\
0x6ad4be6aU, 0xcb8d46cbU, 0xbe67d9beU, 0x39724b39U,\
0x4a94de4aU, 0x4c98d44cU, 0x58b0e858U, 0xcf854acfU,\
0xd0bb6bd0U, 0xefc52aefU, 0xaa4fe5aaU, 0xfbed16fbU,\
0x4386c543U, 0x4d9ad74dU, 0x33665533U, 0x85119485U,\
0x458acf45U, 0xf9e910f9U, 0x02040602U, 0x7ffe817fU,\
0x50a0f050U, 0x3c78443cU, 0x9f25ba9fU, 0xa84be3a8U,\
0x51a2f351U, 0xa35dfea3U, 0x4080c040U, 0x8f058a8fU,\
0x923fad92U, 0x9d21bc9dU, 0x38704838U, 0xf5f104f5U,\
0xbc63dfbcU, 0xb677c1b6U, 0xdaaf75daU, 0x21426321U,\
0x10203010U, 0xffe51affU, 0xf3fd0ef3U, 0xd2bf6dd2U,\
0xcd814ccdU, 0x0c18140cU, 0x13263513U, 0xecc32fecU,\
0x5fbee15fU, 0x9735a297U, 0x4488cc44U, 0x172e3917U,\
0xc49357c4U, 0xa755f2a7U, 0x7efc827eU, 0x3d7a473dU,\
0x64c8ac64U, 0x5dbae75dU, 0x19322b19U, 0x73e69573U,\
0x60c0a060U, 0x81199881U, 0x4f9ed14fU, 0xdca37fdcU,\
0x22446622U, 0x2a547e2aU, 0x903bab90U, 0x880b8388U,\
0x468cca46U, 0xeec729eeU, 0xb86bd3b8U, 0x14283c14U,\
0xdea779deU, 0x5ebce25eU, 0x0b161d0bU, 0xdbad76dbU,\
0xe0db3be0U, 0x32645632U, 0x3a744e3aU, 0x0a141e0aU,\
0x4992db49U, 0x060c0a06U, 0x24486c24U, 0x5cb8e45cU,\
0xc29f5dc2U, 0xd3bd6ed3U, 0xac43efacU, 0x62c4a662U,\
0x9139a891U, 0x9531a495U, 0xe4d337e4U, 0x79f28b79U,\
0xe7d532e7U, 0xc88b43c8U, 0x376e5937U, 0x6ddab76dU,\
0x8d018c8dU, 0xd5b164d5U, 0x4e9cd24eU, 0xa949e0a9U,\
0x6cd8b46cU, 0x56acfa56U, 0xf4f307f4U, 0xeacf25eaU,\
0x65caaf65U, 0x7af48e7aU, 0xae47e9aeU, 0x08101808U,\
0xba6fd5baU, 0x78f08878U, 0x254a6f25U, 0x2e5c722eU,\
0x1c38241cU, 0xa657f1a6U, 0xb473c7b4U, 0xc69751c6U,\
0xe8cb23e8U, 0xdda17cddU, 0x74e89c74U, 0x1f3e211fU,\
0x4b96dd4bU, 0xbd61dcbdU, 0x8b0d868bU, 0x8a0f858aU,\
0x70e09070U, 0x3e7c423eU, 0xb571c4b5U, 0x66ccaa66U,\
0x4890d848U, 0x03060503U, 0xf6f701f6U, 0x0e1c120eU,\
0x61c2a361U, 0x356a5f35U, 0x57aef957U, 0xb969d0b9U,\
0x86179186U, 0xc19958c1U, 0x1d3a271dU, 0x9e27b99eU,\
0xe1d938e1U, 0xf8eb13f8U, 0x982bb398U, 0x11223311U,\
0x69d2bb69U, 0xd9a970d9U, 0x8e07898eU, 0x9433a794U,\
0x9b2db69bU, 0x1e3c221eU, 0x87159287U, 0xe9c920e9U,\
0xce8749ceU, 0x55aaff55U, 0x28507828U, 0xdfa57adfU,\
0x8c038f8cU, 0xa159f8a1U, 0x89098089U, 0x0d1a170dU,\
0xbf65dabfU, 0xe6d731e6U, 0x4284c642U, 0x68d0b868U,\
0x4182c341U, 0x9929b099U, 0x2d5a772dU, 0x0f1e110fU,\
0xb07bcbb0U, 0x54a8fc54U, 0xbb6dd6bbU, 0x162c3a16U \
};
__device__ const uint32_t Te3[256] = { \
0xc6a56363U, 0xf8847c7cU, 0xee997777U, 0xf68d7b7bU,\
0xff0df2f2U, 0xd6bd6b6bU, 0xdeb16f6fU, 0x9154c5c5U,\
0x60503030U, 0x02030101U, 0xcea96767U, 0x567d2b2bU,\
0xe719fefeU, 0xb562d7d7U, 0x4de6ababU, 0xec9a7676U,\
0x8f45cacaU, 0x1f9d8282U, 0x8940c9c9U, 0xfa877d7dU,\
0xef15fafaU, 0xb2eb5959U, 0x8ec94747U, 0xfb0bf0f0U,\
0x41ecadadU, 0xb367d4d4U, 0x5ffda2a2U, 0x45eaafafU,\
0x23bf9c9cU, 0x53f7a4a4U, 0xe4967272U, 0x9b5bc0c0U,\
0x75c2b7b7U, 0xe11cfdfdU, 0x3dae9393U, 0x4c6a2626U,\
0x6c5a3636U, 0x7e413f3fU, 0xf502f7f7U, 0x834fccccU,\
0x685c3434U, 0x51f4a5a5U, 0xd134e5e5U, 0xf908f1f1U,\
0xe2937171U, 0xab73d8d8U, 0x62533131U, 0x2a3f1515U,\
0x080c0404U, 0x9552c7c7U, 0x46652323U, 0x9d5ec3c3U,\
0x30281818U, 0x37a19696U, 0x0a0f0505U, 0x2fb59a9aU,\
0x0e090707U, 0x24361212U, 0x1b9b8080U, 0xdf3de2e2U,\
0xcd26ebebU, 0x4e692727U, 0x7fcdb2b2U, 0xea9f7575U,\
0x121b0909U, 0x1d9e8383U, 0x58742c2cU, 0x342e1a1aU,\
0x362d1b1bU, 0xdcb26e6eU, 0xb4ee5a5aU, 0x5bfba0a0U,\
0xa4f65252U, 0x764d3b3bU, 0xb761d6d6U, 0x7dceb3b3U,\
0x527b2929U, 0xdd3ee3e3U, 0x5e712f2fU, 0x13978484U,\
0xa6f55353U, 0xb968d1d1U, 0x00000000U, 0xc12cededU,\
0x40602020U, 0xe31ffcfcU, 0x79c8b1b1U, 0xb6ed5b5bU,\
0xd4be6a6aU, 0x8d46cbcbU, 0x67d9bebeU, 0x724b3939U,\
0x94de4a4aU, 0x98d44c4cU, 0xb0e85858U, 0x854acfcfU,\
0xbb6bd0d0U, 0xc52aefefU, 0x4fe5aaaaU, 0xed16fbfbU,\
0x86c54343U, 0x9ad74d4dU, 0x66553333U, 0x11948585U,\
0x8acf4545U, 0xe910f9f9U, 0x04060202U, 0xfe817f7fU,\
0xa0f05050U, 0x78443c3cU, 0x25ba9f9fU, 0x4be3a8a8U,\
0xa2f35151U, 0x5dfea3a3U, 0x80c04040U, 0x058a8f8fU,\
0x3fad9292U, 0x21bc9d9dU, 0x70483838U, 0xf104f5f5U,\
0x63dfbcbcU, 0x77c1b6b6U, 0xaf75dadaU, 0x42632121U,\
0x20301010U, 0xe51affffU, 0xfd0ef3f3U, 0xbf6dd2d2U,\
0x814ccdcdU, 0x18140c0cU, 0x26351313U, 0xc32fececU,\
0xbee15f5fU, 0x35a29797U, 0x88cc4444U, 0x2e391717U,\
0x9357c4c4U, 0x55f2a7a7U, 0xfc827e7eU, 0x7a473d3dU,\
0xc8ac6464U, 0xbae75d5dU, 0x322b1919U, 0xe6957373U,\
0xc0a06060U, 0x19988181U, 0x9ed14f4fU, 0xa37fdcdcU,\
0x44662222U, 0x547e2a2aU, 0x3bab9090U, 0x0b838888U,\
0x8cca4646U, 0xc729eeeeU, 0x6bd3b8b8U, 0x283c1414U,\
0xa779dedeU, 0xbce25e5eU, 0x161d0b0bU, 0xad76dbdbU,\
0xdb3be0e0U, 0x64563232U, 0x744e3a3aU, 0x141e0a0aU,\
0x92db4949U, 0x0c0a0606U, 0x486c2424U, 0xb8e45c5cU,\
0x9f5dc2c2U, 0xbd6ed3d3U, 0x43efacacU, 0xc4a66262U,\
0x39a89191U, 0x31a49595U, 0xd337e4e4U, 0xf28b7979U,\
0xd532e7e7U, 0x8b43c8c8U, 0x6e593737U, 0xdab76d6dU,\
0x018c8d8dU, 0xb164d5d5U, 0x9cd24e4eU, 0x49e0a9a9U,\
0xd8b46c6cU, 0xacfa5656U, 0xf307f4f4U, 0xcf25eaeaU,\
0xcaaf6565U, 0xf48e7a7aU, 0x47e9aeaeU, 0x10180808U,\
0x6fd5babaU, 0xf0887878U, 0x4a6f2525U, 0x5c722e2eU,\
0x38241c1cU, 0x57f1a6a6U, 0x73c7b4b4U, 0x9751c6c6U,\
0xcb23e8e8U, 0xa17cddddU, 0xe89c7474U, 0x3e211f1fU,\
0x96dd4b4bU, 0x61dcbdbdU, 0x0d868b8bU, 0x0f858a8aU,\
0xe0907070U, 0x7c423e3eU, 0x71c4b5b5U, 0xccaa6666U,\
0x90d84848U, 0x06050303U, 0xf701f6f6U, 0x1c120e0eU,\
0xc2a36161U, 0x6a5f3535U, 0xaef95757U, 0x69d0b9b9U,\
0x17918686U, 0x9958c1c1U, 0x3a271d1dU, 0x27b99e9eU,\
0xd938e1e1U, 0xeb13f8f8U, 0x2bb39898U, 0x22331111U,\
0xd2bb6969U, 0xa970d9d9U, 0x07898e8eU, 0x33a79494U,\
0x2db69b9bU, 0x3c221e1eU, 0x15928787U, 0xc920e9e9U,\
0x8749ceceU, 0xaaff5555U, 0x50782828U, 0xa57adfdfU,\
0x038f8c8cU, 0x59f8a1a1U, 0x09808989U, 0x1a170d0dU,\
0x65dabfbfU, 0xd731e6e6U, 0x84c64242U, 0xd0b86868U,\
0x82c34141U, 0x29b09999U, 0x5a772d2dU, 0x1e110f0fU,\
0x7bcbb0b0U, 0xa8fc5454U, 0x6dd6bbbbU, 0x2c3a1616U \
};
__constant__ const uint32_t RC[256] = {\
0x0684704c, 0xe620c00a, 0xb2c5fef0, 0x75817b9d, \
0x8b66b4e1, 0x88f3a06b, 0x640f6ba4, 0x2f08f717, \
0x3402de2d, 0x53f28498, 0xcf029d60, 0x9f029114, \
0x0ed6eae6, 0x2e7b4f08, 0xbbf3bcaf, 0xfd5b4f79, \
0xcbcfb0cb, 0x4872448b, 0x79eecd1c, 0xbe397044, \
0x7eeacdee, 0x6e9032b7, 0x8d5335ed, 0x2b8a057b, \
0x67c28f43, 0x5e2e7cd0, 0xe2412761, 0xda4fef1b, \
0x2924d9b0, 0xafcacc07, 0x675ffde2, 0x1fc70b3b, \
0xab4d63f1, 0xe6867fe9, 0xecdb8fca, 0xb9d465ee, \
0x1c30bf84, 0xd4b7cd64, 0x5b2a404f, 0xad037e33, \
0xb2cc0bb9, 0x941723bf, 0x69028b2e, 0x8df69800, \
0xfa0478a6, 0xde6f5572, 0x4aaa9ec8, 0x5c9d2d8a, \
0xdfb49f2b, 0x6b772a12, 0x0efa4f2e, 0x29129fd4, \
0x1ea10344, 0xf449a236, 0x32d611ae, 0xbb6a12ee, \
0xaf044988, 0x4b050084, 0x5f9600c9, 0x9ca8eca6, \
0x21025ed8, 0x9d199c4f, 0x78a2c7e3, 0x27e593ec, \
0xbf3aaaf8, 0xa759c9b7, 0xb9282ecd, 0x82d40173, \
0x6260700d, 0x6186b017, 0x37f2efd9, 0x10307d6b, \
0x5aca45c2, 0x21300443, 0x81c29153, 0xf6fc9ac6, \
0x9223973c, 0x226b68bb, 0x2caf92e8, 0x36d1943a, \
0xd3bf9238, 0x225886eb, 0x6cbab958, 0xe51071b4, \
0xdb863ce5, 0xaef0c677, 0x933dfddd, 0x24e1128d, \
0xbb606268, 0xffeba09c, 0x83e48de3, 0xcb2212b1, \
0x734bd3dc, 0xe2e4d19c, 0x2db91a4e, 0xc72bf77d, \
0x43bb47c3, 0x61301b43, 0x4b1415c4, 0x2cb3924e, \
0xdba775a8, 0xe707eff6, 0x03b231dd, 0x16eb6899, \
0x6df3614b, 0x3c755977, 0x8e5e2302, 0x7eca472c, \
0xcda75a17, 0xd6de7d77, 0x6d1be5b9, 0xb88617f9, \
0xec6b43f0, 0x6ba8e9aa, 0x9d6c069d, 0xa946ee5d, \
0xcb1e6950, 0xf957332b, 0xa2531159, 0x3bf327c1, \
0x2cee0c75, 0x00da619c, 0xe4ed0353, 0x600ed0d9, \
0xf0b1a5a1, 0x96e90cab, 0x80bbbabc, 0x63a4a350, \
0xae3db102, 0x5e962988, 0xab0dde30, 0x938dca39, \
0x17bb8f38, 0xd554a40b, 0x8814f3a8, 0x2e75b442, \
0x34bb8a5b, 0x5f427fd7, 0xaeb6b779, 0x360a16f6, \
0x26f65241, 0xcbe55438, 0x43ce5918, 0xffbaafde, \
0x4ce99a54, 0xb9f3026a, 0xa2ca9cf7, 0x839ec978, \
0xae51a51a, 0x1bdff7be, 0x40c06e28, 0x22901235, \
0xa0c1613c, 0xba7ed22b, 0xc173bc0f, 0x48a659cf, \
0x756acc03, 0x02288288, 0x4ad6bdfd, 0xe9c59da1, \
};
#define AES_ENC_ROUND(n,D,S) \
AES_ENC_STEP(n,D,S,0,1,2,3); \
AES_ENC_STEP(n,D,S,1,2,3,0); \
AES_ENC_STEP(n,D,S,2,3,0,1); \
AES_ENC_STEP(n,D,S,3,0,1,2);
#define AES_ENC_STEP(n,D,S,W,X,Y,Z) \
D##W = TE(0)[ S##W & 0xff]; \
D##W ^= TE(1)[(S##X >> 8) & 0xff]; \
D##W ^= TE(2)[(S##Y >> 16) & 0xff]; \
D##W ^= TE(3)[ S##Z >> 24 ]; \
D##W ^= RC[n + 3 - W];
#define TX (__umul24(blockIdx.x,blockDim.x) + threadIdx.x)
#define SX (threadIdx.x)
#define TE(n) Tes##n
#define COPY_CONSTANT_SHARED_ENC\
__shared__ uint32_t Tes0[256];\
__shared__ uint32_t Tes1[256];\
__shared__ uint32_t Tes2[256];\
__shared__ uint32_t Tes3[256];\
Tes0[SX] = Te0[SX];\
Tes1[SX] = Te1[SX];\
Tes2[SX] = Te2[SX];\
Tes3[SX] = Te3[SX];\
__syncthreads();
#define GLOBAL_LOAD_SHARED_SETUP_512 \
register uint32_t r0, r1, r2, r3; \
register uint32_t s0, s1, s2, s3; \
register uint32_t t0, t1, t2, t3; \
register uint32_t u0, u1, u2, u3; \
register uint32_t v0, v1, v2, v3; \
register uint64_t load[2]; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX]; \
s0 = load[0]; \
s1 = load[0] >> 32; \
s2 = load[1]; \
s3 = load[1] >> 32; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX + 1]; \
t0 = load[0]; \
t1 = load[0] >> 32; \
t2 = load[1]; \
t3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX + 2]; \
u0 = load[0]; \
u1 = load[0] >> 32; \
u2 = load[1]; \
u3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX + 3]; \
v0 = load[0]; \
v1 = load[0] >> 32; \
v2 = load[1]; \
v3 = load[1] >> 32;
#define MIX_512 \
r0 = s0; \
r1 = u2; \
\
s0 = s3; \
s3 = v3; \
v3 = v2; \
v2 = t2; \
t2 = v0; \
v0 = s2; \
s2 = t3; \
t3 = t0; \
t0 = u0; \
u0 = u1; \
u1 = s1; \
s1 = u3; \
u3 = t1; \
t1 = r0; \
\
u2 = v1; \
v1 = r1;
__global__ void haraka512Kernel(const uint64_t* msg, uint64_t* hash, const uint32_t num_msgs)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_msgs))
return;
GLOBAL_LOAD_SHARED_SETUP_512
AES_ENC_ROUND(0, r, s);
AES_ENC_ROUND(16, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 16, t, r);
AES_ENC_ROUND(2 * 4, r, u);
AES_ENC_ROUND(2 * 4 + 16, u, r);
AES_ENC_ROUND(3 * 4, r, v);
AES_ENC_ROUND(3 * 4 + 16, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 48, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 48, t, r);
AES_ENC_ROUND(2 * 4 + 32, r, u);
AES_ENC_ROUND(2 * 4 + 48, u, r);
AES_ENC_ROUND(3 * 4 + 32, r, v);
AES_ENC_ROUND(3 * 4 + 48, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 80, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 80, t, r);
AES_ENC_ROUND(2 * 4 + 64, r, u);
AES_ENC_ROUND(2 * 4 + 80, u, r);
AES_ENC_ROUND(3 * 4 + 64, r, v);
AES_ENC_ROUND(3 * 4 + 80, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 96, r, s);
AES_ENC_ROUND(0 * 4 + 112, s, r);
AES_ENC_ROUND(1 * 4 + 96, r, t);
AES_ENC_ROUND(1 * 4 + 112, t, r);
AES_ENC_ROUND(2 * 4 + 96, r, u);
AES_ENC_ROUND(2 * 4 + 112, u, r);
AES_ENC_ROUND(3 * 4 + 96, r, v);
AES_ENC_ROUND(3 * 4 + 112, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 128, r, s);
AES_ENC_ROUND(0 * 4 + 144, s, r);
AES_ENC_ROUND(1 * 4 + 128, r, t);
AES_ENC_ROUND(1 * 4 + 144, t, r);
AES_ENC_ROUND(2 * 4 + 128, r, u);
AES_ENC_ROUND(2 * 4 + 144, u, r);
AES_ENC_ROUND(3 * 4 + 128, r, v);
AES_ENC_ROUND(3 * 4 + 144, v, r);
MIX_512
load[1] = s2 | ((uint64_t)s3) << 32;
hash[TX * 4] = load[1] ^ msg[8 * TX + 1];
load[1] = t2 | ((uint64_t)t3) << 32;
hash[TX * 4 + 1] = load[1] ^ msg[8 * TX + 3];
load[1] = u0 | ((uint64_t)u1) << 32;
hash[TX * 4 + 2] = load[1] ^ msg[8 * TX + 4];
load[1] = v0 | ((uint64_t)v1) << 32;
load[1] ^= load[0];
hash[TX * 4 + 3] = load[1];
}
#define GLOBAL_LOAD_SHARED_SETUP_256(input) \
register uint32_t r0, r1, r2, r3; \
register uint32_t s0, s1, s2, s3; \
register uint32_t t0, t1, t2, t3; \
register uint64_t load[4]; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(input)[2 * TX]; \
s0 = load[0]; \
s1 = load[0] >> 32; \
s2 = load[1]; \
s3 = load[1] >> 32; \
reinterpret_cast<uint4*>(load)[1] = reinterpret_cast<const uint4*>(input)[2 * TX + 1]; \
t0 = load[2]; \
t1 = load[2] >> 32; \
t2 = load[3]; \
t3 = load[3] >> 32;
#define MIX_256 \
r0 = s1; \
r1 = s3; \
\
s1 = t0; \
t0 = s2; \
s2 = r0; \
\
s3 = t1; \
t1 = t2; \
t2 = r1;
__global__ void haraka256Kernel(const uint64_t* msg, uint64_t* hash, const uint32_t num_msgs)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_msgs))
return;
GLOBAL_LOAD_SHARED_SETUP_256(msg)
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
reinterpret_cast<uint4*>(hash)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
reinterpret_cast<uint4*>(hash)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
__global__ void harakaOTSKernel(const uint64_t* msg, uint16_t* b, const uint32_t num_msgs)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_msgs))
return;
GLOBAL_LOAD_SHARED_SETUP_256(msg)
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
#pragma unroll
for(int i = 1; i < 17; ++i)
b[17 * TX + i] = reinterpret_cast<uint16_t*>(load)[i - 1];
uint16_t checksum = 256 - reinterpret_cast<uint8_t*>(load)[0];
#pragma unroll
for (int i = 1; i < 32; ++i)
checksum += (256 - reinterpret_cast<uint8_t*>(load)[i]);
b[17 * TX] = checksum;
}
__global__ void harakaOTSCreateSignatureKernel(const uint64_t* priv_key, uint64_t* pub_key, uint64_t* signature, const uint8_t* b, const uint32_t num_chunks)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_chunks))
return;
GLOBAL_LOAD_SHARED_SETUP_256(priv_key)
uint8_t b_ = b[TX];
#pragma unroll
for (int i = 0; i < 255; ++i)
{
if (i == b_)
{
reinterpret_cast<uint4*>(signature)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(signature)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
if (i < 255)
{
s0 = load[0];
s1 = load[0] >> 32;
s2 = load[1];
s3 = load[1] >> 32;
t0 = load[2];
t1 = load[2] >> 32;
t2 = load[3];
t3 = load[3] >> 32;
}
}
if (b_ == 255)
{
reinterpret_cast<uint4*>(signature)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(signature)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
reinterpret_cast<uint4*>(pub_key)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(pub_key)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
__global__ void harakaOTSCreateVerificationKernel(const uint64_t* signature, uint64_t* verification, uint8_t* b, const uint32_t num_chunks)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_chunks))
return;
GLOBAL_LOAD_SHARED_SETUP_256(signature)
uint8_t b_ = UINT8_MAX - b[TX];
for (uint8_t i = 0; i < b_; ++i)
{
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
if (i < 255)
{
s0 = load[0];
s1 = load[0] >> 32;
s2 = load[1];
s3 = load[1] >> 32;
t0 = load[2];
t1 = load[2] >> 32;
t2 = load[3];
t3 = load[3] >> 32;
}
}
reinterpret_cast<uint4*>(verification)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(verification)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
#define GLOBAL_LOAD_SHARED_SETUP_MERKLE \
register uint32_t r0, r1, r2, r3; \
register uint32_t s0, s1, s2, s3; \
register uint32_t t0, t1, t2, t3; \
register uint32_t u0, u1, u2, u3; \
register uint32_t v0, v1, v2, v3; \
register uint64_t load[2]; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX]; \
s0 = load[0]; \
s1 = load[0] >> 32; \
s2 = load[1]; \
s3 = load[1] >> 32; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX + 1]; \
t0 = load[0]; \
t1 = load[0] >> 32; \
t2 = load[1]; \
t3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX + 2]; \
u0 = load[0]; \
u1 = load[0] >> 32; \
u2 = load[1]; \
u3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX + 3]; \
v0 = load[0]; \
v1 = load[0] >> 32; \
v2 = load[1]; \
v3 = load[1] >> 32;
__global__ void harakaBuildMerkleTree(uint64_t* tree, const uint32_t num_parents, const uint32_t depth)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_parents))
return;
uint32_t childs = num_parents * 2;
GLOBAL_LOAD_SHARED_SETUP_MERKLE
for (int s = 1; s < (1 << depth); s *= 2)
{
if ((threadIdx.x % s) == 0)
{
AES_ENC_ROUND(0, r, s);
AES_ENC_ROUND(16, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 16, t, r);
AES_ENC_ROUND(2 * 4, r, u);
AES_ENC_ROUND(2 * 4 + 16, u, r);
AES_ENC_ROUND(3 * 4, r, v);
AES_ENC_ROUND(3 * 4 + 16, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 48, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 48, t, r);
AES_ENC_ROUND(2 * 4 + 32, r, u);
AES_ENC_ROUND(2 * 4 + 48, u, r);
AES_ENC_ROUND(3 * 4 + 32, r, v);
AES_ENC_ROUND(3 * 4 + 48, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 80, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 80, t, r);
AES_ENC_ROUND(2 * 4 + 64, r, u);
AES_ENC_ROUND(2 * 4 + 80, u, r);
AES_ENC_ROUND(3 * 4 + 64, r, v);
AES_ENC_ROUND(3 * 4 + 80, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 96, r, s);
AES_ENC_ROUND(0 * 4 + 112, s, r);
AES_ENC_ROUND(1 * 4 + 96, r, t);
AES_ENC_ROUND(1 * 4 + 112, t, r);
AES_ENC_ROUND(2 * 4 + 96, r, u);
AES_ENC_ROUND(2 * 4 + 112, u, r);
AES_ENC_ROUND(3 * 4 + 96, r, v);
AES_ENC_ROUND(3 * 4 + 112, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 128, r, s);
AES_ENC_ROUND(0 * 4 + 144, s, r);
AES_ENC_ROUND(1 * 4 + 128, r, t);
AES_ENC_ROUND(1 * 4 + 144, t, r);
AES_ENC_ROUND(2 * 4 + 128, r, u);
AES_ENC_ROUND(2 * 4 + 144, u, r);
AES_ENC_ROUND(3 * 4 + 128, r, v);
AES_ENC_ROUND(3 * 4 + 144, v, r);
MIX_512
load[1] = s2 | ((uint64_t)s3) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 1];
tree[(childs / 2 - 1) * 4 + TX / s * 4] = load[1];
s0 = load[1];
s1 = load[1] >> 32;
load[1] = t2 | ((uint64_t)t3) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 3];
tree[(childs / 2 - 1) * 4 + TX / s * 4 + 1] = load[1];
s2 = load[1];
s3 = load[1] >> 32;
load[1] = u0 | ((uint64_t)u1) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 4];
tree[(childs / 2 - 1) * 4 + TX / s * 4 + 2] = load[1];
t0 = load[1];
t1 = load[1] >> 32;
load[1] = v0 | ((uint64_t)v1) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 6];
tree[(childs / 2 - 1) * 4 + TX / s * 4 + 3] = load[1];
t2 = load[1];
t3 = load[1] >> 32;
u0 = __shfl_down(s0, s);
u1 = __shfl_down(s1, s);
u2 = __shfl_down(s2, s);
u3 = __shfl_down(s3, s);
v0 = __shfl_down(t0, s);
v1 = __shfl_down(t1, s);
v2 = __shfl_down(t2, s);
v3 = __shfl_down(t3, s);
childs = childs >> 1;
}
}
} | 71b2a8e5f790b754a0abc295e0640f64cc4bafc2.cu |
#include "haraka_cuda.h"
#include <stdint.h>
__device__ const uint32_t Te0[256] = { \
0xa56363c6U, 0x847c7cf8U, 0x997777eeU, 0x8d7b7bf6U,\
0x0df2f2ffU, 0xbd6b6bd6U, 0xb16f6fdeU, 0x54c5c591U,\
0x50303060U, 0x03010102U, 0xa96767ceU, 0x7d2b2b56U,\
0x19fefee7U, 0x62d7d7b5U, 0xe6abab4dU, 0x9a7676ecU,\
0x45caca8fU, 0x9d82821fU, 0x40c9c989U, 0x877d7dfaU,\
0x15fafaefU, 0xeb5959b2U, 0xc947478eU, 0x0bf0f0fbU,\
0xecadad41U, 0x67d4d4b3U, 0xfda2a25fU, 0xeaafaf45U,\
0xbf9c9c23U, 0xf7a4a453U, 0x967272e4U, 0x5bc0c09bU,\
0xc2b7b775U, 0x1cfdfde1U, 0xae93933dU, 0x6a26264cU,\
0x5a36366cU, 0x413f3f7eU, 0x02f7f7f5U, 0x4fcccc83U,\
0x5c343468U, 0xf4a5a551U, 0x34e5e5d1U, 0x08f1f1f9U,\
0x937171e2U, 0x73d8d8abU, 0x53313162U, 0x3f15152aU,\
0x0c040408U, 0x52c7c795U, 0x65232346U, 0x5ec3c39dU,\
0x28181830U, 0xa1969637U, 0x0f05050aU, 0xb59a9a2fU,\
0x0907070eU, 0x36121224U, 0x9b80801bU, 0x3de2e2dfU,\
0x26ebebcdU, 0x6927274eU, 0xcdb2b27fU, 0x9f7575eaU,\
0x1b090912U, 0x9e83831dU, 0x742c2c58U, 0x2e1a1a34U,\
0x2d1b1b36U, 0xb26e6edcU, 0xee5a5ab4U, 0xfba0a05bU,\
0xf65252a4U, 0x4d3b3b76U, 0x61d6d6b7U, 0xceb3b37dU,\
0x7b292952U, 0x3ee3e3ddU, 0x712f2f5eU, 0x97848413U,\
0xf55353a6U, 0x68d1d1b9U, 0x00000000U, 0x2cededc1U,\
0x60202040U, 0x1ffcfce3U, 0xc8b1b179U, 0xed5b5bb6U,\
0xbe6a6ad4U, 0x46cbcb8dU, 0xd9bebe67U, 0x4b393972U,\
0xde4a4a94U, 0xd44c4c98U, 0xe85858b0U, 0x4acfcf85U,\
0x6bd0d0bbU, 0x2aefefc5U, 0xe5aaaa4fU, 0x16fbfbedU,\
0xc5434386U, 0xd74d4d9aU, 0x55333366U, 0x94858511U,\
0xcf45458aU, 0x10f9f9e9U, 0x06020204U, 0x817f7ffeU,\
0xf05050a0U, 0x443c3c78U, 0xba9f9f25U, 0xe3a8a84bU,\
0xf35151a2U, 0xfea3a35dU, 0xc0404080U, 0x8a8f8f05U,\
0xad92923fU, 0xbc9d9d21U, 0x48383870U, 0x04f5f5f1U,\
0xdfbcbc63U, 0xc1b6b677U, 0x75dadaafU, 0x63212142U,\
0x30101020U, 0x1affffe5U, 0x0ef3f3fdU, 0x6dd2d2bfU,\
0x4ccdcd81U, 0x140c0c18U, 0x35131326U, 0x2fececc3U,\
0xe15f5fbeU, 0xa2979735U, 0xcc444488U, 0x3917172eU,\
0x57c4c493U, 0xf2a7a755U, 0x827e7efcU, 0x473d3d7aU,\
0xac6464c8U, 0xe75d5dbaU, 0x2b191932U, 0x957373e6U,\
0xa06060c0U, 0x98818119U, 0xd14f4f9eU, 0x7fdcdca3U,\
0x66222244U, 0x7e2a2a54U, 0xab90903bU, 0x8388880bU,\
0xca46468cU, 0x29eeeec7U, 0xd3b8b86bU, 0x3c141428U,\
0x79dedea7U, 0xe25e5ebcU, 0x1d0b0b16U, 0x76dbdbadU,\
0x3be0e0dbU, 0x56323264U, 0x4e3a3a74U, 0x1e0a0a14U,\
0xdb494992U, 0x0a06060cU, 0x6c242448U, 0xe45c5cb8U,\
0x5dc2c29fU, 0x6ed3d3bdU, 0xefacac43U, 0xa66262c4U,\
0xa8919139U, 0xa4959531U, 0x37e4e4d3U, 0x8b7979f2U,\
0x32e7e7d5U, 0x43c8c88bU, 0x5937376eU, 0xb76d6ddaU,\
0x8c8d8d01U, 0x64d5d5b1U, 0xd24e4e9cU, 0xe0a9a949U,\
0xb46c6cd8U, 0xfa5656acU, 0x07f4f4f3U, 0x25eaeacfU,\
0xaf6565caU, 0x8e7a7af4U, 0xe9aeae47U, 0x18080810U,\
0xd5baba6fU, 0x887878f0U, 0x6f25254aU, 0x722e2e5cU,\
0x241c1c38U, 0xf1a6a657U, 0xc7b4b473U, 0x51c6c697U,\
0x23e8e8cbU, 0x7cdddda1U, 0x9c7474e8U, 0x211f1f3eU,\
0xdd4b4b96U, 0xdcbdbd61U, 0x868b8b0dU, 0x858a8a0fU,\
0x907070e0U, 0x423e3e7cU, 0xc4b5b571U, 0xaa6666ccU,\
0xd8484890U, 0x05030306U, 0x01f6f6f7U, 0x120e0e1cU,\
0xa36161c2U, 0x5f35356aU, 0xf95757aeU, 0xd0b9b969U,\
0x91868617U, 0x58c1c199U, 0x271d1d3aU, 0xb99e9e27U,\
0x38e1e1d9U, 0x13f8f8ebU, 0xb398982bU, 0x33111122U,\
0xbb6969d2U, 0x70d9d9a9U, 0x898e8e07U, 0xa7949433U,\
0xb69b9b2dU, 0x221e1e3cU, 0x92878715U, 0x20e9e9c9U,\
0x49cece87U, 0xff5555aaU, 0x78282850U, 0x7adfdfa5U,\
0x8f8c8c03U, 0xf8a1a159U, 0x80898909U, 0x170d0d1aU,\
0xdabfbf65U, 0x31e6e6d7U, 0xc6424284U, 0xb86868d0U,\
0xc3414182U, 0xb0999929U, 0x772d2d5aU, 0x110f0f1eU,\
0xcbb0b07bU, 0xfc5454a8U, 0xd6bbbb6dU, 0x3a16162cU \
};
__device__ const uint32_t Te1[256] = { \
0x6363c6a5U, 0x7c7cf884U, 0x7777ee99U, 0x7b7bf68dU,\
0xf2f2ff0dU, 0x6b6bd6bdU, 0x6f6fdeb1U, 0xc5c59154U,\
0x30306050U, 0x01010203U, 0x6767cea9U, 0x2b2b567dU,\
0xfefee719U, 0xd7d7b562U, 0xabab4de6U, 0x7676ec9aU,\
0xcaca8f45U, 0x82821f9dU, 0xc9c98940U, 0x7d7dfa87U,\
0xfafaef15U, 0x5959b2ebU, 0x47478ec9U, 0xf0f0fb0bU,\
0xadad41ecU, 0xd4d4b367U, 0xa2a25ffdU, 0xafaf45eaU,\
0x9c9c23bfU, 0xa4a453f7U, 0x7272e496U, 0xc0c09b5bU,\
0xb7b775c2U, 0xfdfde11cU, 0x93933daeU, 0x26264c6aU,\
0x36366c5aU, 0x3f3f7e41U, 0xf7f7f502U, 0xcccc834fU,\
0x3434685cU, 0xa5a551f4U, 0xe5e5d134U, 0xf1f1f908U,\
0x7171e293U, 0xd8d8ab73U, 0x31316253U, 0x15152a3fU,\
0x0404080cU, 0xc7c79552U, 0x23234665U, 0xc3c39d5eU,\
0x18183028U, 0x969637a1U, 0x05050a0fU, 0x9a9a2fb5U,\
0x07070e09U, 0x12122436U, 0x80801b9bU, 0xe2e2df3dU,\
0xebebcd26U, 0x27274e69U, 0xb2b27fcdU, 0x7575ea9fU,\
0x0909121bU, 0x83831d9eU, 0x2c2c5874U, 0x1a1a342eU,\
0x1b1b362dU, 0x6e6edcb2U, 0x5a5ab4eeU, 0xa0a05bfbU,\
0x5252a4f6U, 0x3b3b764dU, 0xd6d6b761U, 0xb3b37dceU,\
0x2929527bU, 0xe3e3dd3eU, 0x2f2f5e71U, 0x84841397U,\
0x5353a6f5U, 0xd1d1b968U, 0x00000000U, 0xededc12cU,\
0x20204060U, 0xfcfce31fU, 0xb1b179c8U, 0x5b5bb6edU,\
0x6a6ad4beU, 0xcbcb8d46U, 0xbebe67d9U, 0x3939724bU,\
0x4a4a94deU, 0x4c4c98d4U, 0x5858b0e8U, 0xcfcf854aU,\
0xd0d0bb6bU, 0xefefc52aU, 0xaaaa4fe5U, 0xfbfbed16U,\
0x434386c5U, 0x4d4d9ad7U, 0x33336655U, 0x85851194U,\
0x45458acfU, 0xf9f9e910U, 0x02020406U, 0x7f7ffe81U,\
0x5050a0f0U, 0x3c3c7844U, 0x9f9f25baU, 0xa8a84be3U,\
0x5151a2f3U, 0xa3a35dfeU, 0x404080c0U, 0x8f8f058aU,\
0x92923fadU, 0x9d9d21bcU, 0x38387048U, 0xf5f5f104U,\
0xbcbc63dfU, 0xb6b677c1U, 0xdadaaf75U, 0x21214263U,\
0x10102030U, 0xffffe51aU, 0xf3f3fd0eU, 0xd2d2bf6dU,\
0xcdcd814cU, 0x0c0c1814U, 0x13132635U, 0xececc32fU,\
0x5f5fbee1U, 0x979735a2U, 0x444488ccU, 0x17172e39U,\
0xc4c49357U, 0xa7a755f2U, 0x7e7efc82U, 0x3d3d7a47U,\
0x6464c8acU, 0x5d5dbae7U, 0x1919322bU, 0x7373e695U,\
0x6060c0a0U, 0x81811998U, 0x4f4f9ed1U, 0xdcdca37fU,\
0x22224466U, 0x2a2a547eU, 0x90903babU, 0x88880b83U,\
0x46468ccaU, 0xeeeec729U, 0xb8b86bd3U, 0x1414283cU,\
0xdedea779U, 0x5e5ebce2U, 0x0b0b161dU, 0xdbdbad76U,\
0xe0e0db3bU, 0x32326456U, 0x3a3a744eU, 0x0a0a141eU,\
0x494992dbU, 0x06060c0aU, 0x2424486cU, 0x5c5cb8e4U,\
0xc2c29f5dU, 0xd3d3bd6eU, 0xacac43efU, 0x6262c4a6U,\
0x919139a8U, 0x959531a4U, 0xe4e4d337U, 0x7979f28bU,\
0xe7e7d532U, 0xc8c88b43U, 0x37376e59U, 0x6d6ddab7U,\
0x8d8d018cU, 0xd5d5b164U, 0x4e4e9cd2U, 0xa9a949e0U,\
0x6c6cd8b4U, 0x5656acfaU, 0xf4f4f307U, 0xeaeacf25U,\
0x6565caafU, 0x7a7af48eU, 0xaeae47e9U, 0x08081018U,\
0xbaba6fd5U, 0x7878f088U, 0x25254a6fU, 0x2e2e5c72U,\
0x1c1c3824U, 0xa6a657f1U, 0xb4b473c7U, 0xc6c69751U,\
0xe8e8cb23U, 0xdddda17cU, 0x7474e89cU, 0x1f1f3e21U,\
0x4b4b96ddU, 0xbdbd61dcU, 0x8b8b0d86U, 0x8a8a0f85U,\
0x7070e090U, 0x3e3e7c42U, 0xb5b571c4U, 0x6666ccaaU,\
0x484890d8U, 0x03030605U, 0xf6f6f701U, 0x0e0e1c12U,\
0x6161c2a3U, 0x35356a5fU, 0x5757aef9U, 0xb9b969d0U,\
0x86861791U, 0xc1c19958U, 0x1d1d3a27U, 0x9e9e27b9U,\
0xe1e1d938U, 0xf8f8eb13U, 0x98982bb3U, 0x11112233U,\
0x6969d2bbU, 0xd9d9a970U, 0x8e8e0789U, 0x949433a7U,\
0x9b9b2db6U, 0x1e1e3c22U, 0x87871592U, 0xe9e9c920U,\
0xcece8749U, 0x5555aaffU, 0x28285078U, 0xdfdfa57aU,\
0x8c8c038fU, 0xa1a159f8U, 0x89890980U, 0x0d0d1a17U,\
0xbfbf65daU, 0xe6e6d731U, 0x424284c6U, 0x6868d0b8U,\
0x414182c3U, 0x999929b0U, 0x2d2d5a77U, 0x0f0f1e11U,\
0xb0b07bcbU, 0x5454a8fcU, 0xbbbb6dd6U, 0x16162c3aU \
};
__device__ const uint32_t Te2[256] = { \
0x63c6a563U, 0x7cf8847cU, 0x77ee9977U, 0x7bf68d7bU,\
0xf2ff0df2U, 0x6bd6bd6bU, 0x6fdeb16fU, 0xc59154c5U,\
0x30605030U, 0x01020301U, 0x67cea967U, 0x2b567d2bU,\
0xfee719feU, 0xd7b562d7U, 0xab4de6abU, 0x76ec9a76U,\
0xca8f45caU, 0x821f9d82U, 0xc98940c9U, 0x7dfa877dU,\
0xfaef15faU, 0x59b2eb59U, 0x478ec947U, 0xf0fb0bf0U,\
0xad41ecadU, 0xd4b367d4U, 0xa25ffda2U, 0xaf45eaafU,\
0x9c23bf9cU, 0xa453f7a4U, 0x72e49672U, 0xc09b5bc0U,\
0xb775c2b7U, 0xfde11cfdU, 0x933dae93U, 0x264c6a26U,\
0x366c5a36U, 0x3f7e413fU, 0xf7f502f7U, 0xcc834fccU,\
0x34685c34U, 0xa551f4a5U, 0xe5d134e5U, 0xf1f908f1U,\
0x71e29371U, 0xd8ab73d8U, 0x31625331U, 0x152a3f15U,\
0x04080c04U, 0xc79552c7U, 0x23466523U, 0xc39d5ec3U,\
0x18302818U, 0x9637a196U, 0x050a0f05U, 0x9a2fb59aU,\
0x070e0907U, 0x12243612U, 0x801b9b80U, 0xe2df3de2U,\
0xebcd26ebU, 0x274e6927U, 0xb27fcdb2U, 0x75ea9f75U,\
0x09121b09U, 0x831d9e83U, 0x2c58742cU, 0x1a342e1aU,\
0x1b362d1bU, 0x6edcb26eU, 0x5ab4ee5aU, 0xa05bfba0U,\
0x52a4f652U, 0x3b764d3bU, 0xd6b761d6U, 0xb37dceb3U,\
0x29527b29U, 0xe3dd3ee3U, 0x2f5e712fU, 0x84139784U,\
0x53a6f553U, 0xd1b968d1U, 0x00000000U, 0xedc12cedU,\
0x20406020U, 0xfce31ffcU, 0xb179c8b1U, 0x5bb6ed5bU,\
0x6ad4be6aU, 0xcb8d46cbU, 0xbe67d9beU, 0x39724b39U,\
0x4a94de4aU, 0x4c98d44cU, 0x58b0e858U, 0xcf854acfU,\
0xd0bb6bd0U, 0xefc52aefU, 0xaa4fe5aaU, 0xfbed16fbU,\
0x4386c543U, 0x4d9ad74dU, 0x33665533U, 0x85119485U,\
0x458acf45U, 0xf9e910f9U, 0x02040602U, 0x7ffe817fU,\
0x50a0f050U, 0x3c78443cU, 0x9f25ba9fU, 0xa84be3a8U,\
0x51a2f351U, 0xa35dfea3U, 0x4080c040U, 0x8f058a8fU,\
0x923fad92U, 0x9d21bc9dU, 0x38704838U, 0xf5f104f5U,\
0xbc63dfbcU, 0xb677c1b6U, 0xdaaf75daU, 0x21426321U,\
0x10203010U, 0xffe51affU, 0xf3fd0ef3U, 0xd2bf6dd2U,\
0xcd814ccdU, 0x0c18140cU, 0x13263513U, 0xecc32fecU,\
0x5fbee15fU, 0x9735a297U, 0x4488cc44U, 0x172e3917U,\
0xc49357c4U, 0xa755f2a7U, 0x7efc827eU, 0x3d7a473dU,\
0x64c8ac64U, 0x5dbae75dU, 0x19322b19U, 0x73e69573U,\
0x60c0a060U, 0x81199881U, 0x4f9ed14fU, 0xdca37fdcU,\
0x22446622U, 0x2a547e2aU, 0x903bab90U, 0x880b8388U,\
0x468cca46U, 0xeec729eeU, 0xb86bd3b8U, 0x14283c14U,\
0xdea779deU, 0x5ebce25eU, 0x0b161d0bU, 0xdbad76dbU,\
0xe0db3be0U, 0x32645632U, 0x3a744e3aU, 0x0a141e0aU,\
0x4992db49U, 0x060c0a06U, 0x24486c24U, 0x5cb8e45cU,\
0xc29f5dc2U, 0xd3bd6ed3U, 0xac43efacU, 0x62c4a662U,\
0x9139a891U, 0x9531a495U, 0xe4d337e4U, 0x79f28b79U,\
0xe7d532e7U, 0xc88b43c8U, 0x376e5937U, 0x6ddab76dU,\
0x8d018c8dU, 0xd5b164d5U, 0x4e9cd24eU, 0xa949e0a9U,\
0x6cd8b46cU, 0x56acfa56U, 0xf4f307f4U, 0xeacf25eaU,\
0x65caaf65U, 0x7af48e7aU, 0xae47e9aeU, 0x08101808U,\
0xba6fd5baU, 0x78f08878U, 0x254a6f25U, 0x2e5c722eU,\
0x1c38241cU, 0xa657f1a6U, 0xb473c7b4U, 0xc69751c6U,\
0xe8cb23e8U, 0xdda17cddU, 0x74e89c74U, 0x1f3e211fU,\
0x4b96dd4bU, 0xbd61dcbdU, 0x8b0d868bU, 0x8a0f858aU,\
0x70e09070U, 0x3e7c423eU, 0xb571c4b5U, 0x66ccaa66U,\
0x4890d848U, 0x03060503U, 0xf6f701f6U, 0x0e1c120eU,\
0x61c2a361U, 0x356a5f35U, 0x57aef957U, 0xb969d0b9U,\
0x86179186U, 0xc19958c1U, 0x1d3a271dU, 0x9e27b99eU,\
0xe1d938e1U, 0xf8eb13f8U, 0x982bb398U, 0x11223311U,\
0x69d2bb69U, 0xd9a970d9U, 0x8e07898eU, 0x9433a794U,\
0x9b2db69bU, 0x1e3c221eU, 0x87159287U, 0xe9c920e9U,\
0xce8749ceU, 0x55aaff55U, 0x28507828U, 0xdfa57adfU,\
0x8c038f8cU, 0xa159f8a1U, 0x89098089U, 0x0d1a170dU,\
0xbf65dabfU, 0xe6d731e6U, 0x4284c642U, 0x68d0b868U,\
0x4182c341U, 0x9929b099U, 0x2d5a772dU, 0x0f1e110fU,\
0xb07bcbb0U, 0x54a8fc54U, 0xbb6dd6bbU, 0x162c3a16U \
};
__device__ const uint32_t Te3[256] = { \
0xc6a56363U, 0xf8847c7cU, 0xee997777U, 0xf68d7b7bU,\
0xff0df2f2U, 0xd6bd6b6bU, 0xdeb16f6fU, 0x9154c5c5U,\
0x60503030U, 0x02030101U, 0xcea96767U, 0x567d2b2bU,\
0xe719fefeU, 0xb562d7d7U, 0x4de6ababU, 0xec9a7676U,\
0x8f45cacaU, 0x1f9d8282U, 0x8940c9c9U, 0xfa877d7dU,\
0xef15fafaU, 0xb2eb5959U, 0x8ec94747U, 0xfb0bf0f0U,\
0x41ecadadU, 0xb367d4d4U, 0x5ffda2a2U, 0x45eaafafU,\
0x23bf9c9cU, 0x53f7a4a4U, 0xe4967272U, 0x9b5bc0c0U,\
0x75c2b7b7U, 0xe11cfdfdU, 0x3dae9393U, 0x4c6a2626U,\
0x6c5a3636U, 0x7e413f3fU, 0xf502f7f7U, 0x834fccccU,\
0x685c3434U, 0x51f4a5a5U, 0xd134e5e5U, 0xf908f1f1U,\
0xe2937171U, 0xab73d8d8U, 0x62533131U, 0x2a3f1515U,\
0x080c0404U, 0x9552c7c7U, 0x46652323U, 0x9d5ec3c3U,\
0x30281818U, 0x37a19696U, 0x0a0f0505U, 0x2fb59a9aU,\
0x0e090707U, 0x24361212U, 0x1b9b8080U, 0xdf3de2e2U,\
0xcd26ebebU, 0x4e692727U, 0x7fcdb2b2U, 0xea9f7575U,\
0x121b0909U, 0x1d9e8383U, 0x58742c2cU, 0x342e1a1aU,\
0x362d1b1bU, 0xdcb26e6eU, 0xb4ee5a5aU, 0x5bfba0a0U,\
0xa4f65252U, 0x764d3b3bU, 0xb761d6d6U, 0x7dceb3b3U,\
0x527b2929U, 0xdd3ee3e3U, 0x5e712f2fU, 0x13978484U,\
0xa6f55353U, 0xb968d1d1U, 0x00000000U, 0xc12cededU,\
0x40602020U, 0xe31ffcfcU, 0x79c8b1b1U, 0xb6ed5b5bU,\
0xd4be6a6aU, 0x8d46cbcbU, 0x67d9bebeU, 0x724b3939U,\
0x94de4a4aU, 0x98d44c4cU, 0xb0e85858U, 0x854acfcfU,\
0xbb6bd0d0U, 0xc52aefefU, 0x4fe5aaaaU, 0xed16fbfbU,\
0x86c54343U, 0x9ad74d4dU, 0x66553333U, 0x11948585U,\
0x8acf4545U, 0xe910f9f9U, 0x04060202U, 0xfe817f7fU,\
0xa0f05050U, 0x78443c3cU, 0x25ba9f9fU, 0x4be3a8a8U,\
0xa2f35151U, 0x5dfea3a3U, 0x80c04040U, 0x058a8f8fU,\
0x3fad9292U, 0x21bc9d9dU, 0x70483838U, 0xf104f5f5U,\
0x63dfbcbcU, 0x77c1b6b6U, 0xaf75dadaU, 0x42632121U,\
0x20301010U, 0xe51affffU, 0xfd0ef3f3U, 0xbf6dd2d2U,\
0x814ccdcdU, 0x18140c0cU, 0x26351313U, 0xc32fececU,\
0xbee15f5fU, 0x35a29797U, 0x88cc4444U, 0x2e391717U,\
0x9357c4c4U, 0x55f2a7a7U, 0xfc827e7eU, 0x7a473d3dU,\
0xc8ac6464U, 0xbae75d5dU, 0x322b1919U, 0xe6957373U,\
0xc0a06060U, 0x19988181U, 0x9ed14f4fU, 0xa37fdcdcU,\
0x44662222U, 0x547e2a2aU, 0x3bab9090U, 0x0b838888U,\
0x8cca4646U, 0xc729eeeeU, 0x6bd3b8b8U, 0x283c1414U,\
0xa779dedeU, 0xbce25e5eU, 0x161d0b0bU, 0xad76dbdbU,\
0xdb3be0e0U, 0x64563232U, 0x744e3a3aU, 0x141e0a0aU,\
0x92db4949U, 0x0c0a0606U, 0x486c2424U, 0xb8e45c5cU,\
0x9f5dc2c2U, 0xbd6ed3d3U, 0x43efacacU, 0xc4a66262U,\
0x39a89191U, 0x31a49595U, 0xd337e4e4U, 0xf28b7979U,\
0xd532e7e7U, 0x8b43c8c8U, 0x6e593737U, 0xdab76d6dU,\
0x018c8d8dU, 0xb164d5d5U, 0x9cd24e4eU, 0x49e0a9a9U,\
0xd8b46c6cU, 0xacfa5656U, 0xf307f4f4U, 0xcf25eaeaU,\
0xcaaf6565U, 0xf48e7a7aU, 0x47e9aeaeU, 0x10180808U,\
0x6fd5babaU, 0xf0887878U, 0x4a6f2525U, 0x5c722e2eU,\
0x38241c1cU, 0x57f1a6a6U, 0x73c7b4b4U, 0x9751c6c6U,\
0xcb23e8e8U, 0xa17cddddU, 0xe89c7474U, 0x3e211f1fU,\
0x96dd4b4bU, 0x61dcbdbdU, 0x0d868b8bU, 0x0f858a8aU,\
0xe0907070U, 0x7c423e3eU, 0x71c4b5b5U, 0xccaa6666U,\
0x90d84848U, 0x06050303U, 0xf701f6f6U, 0x1c120e0eU,\
0xc2a36161U, 0x6a5f3535U, 0xaef95757U, 0x69d0b9b9U,\
0x17918686U, 0x9958c1c1U, 0x3a271d1dU, 0x27b99e9eU,\
0xd938e1e1U, 0xeb13f8f8U, 0x2bb39898U, 0x22331111U,\
0xd2bb6969U, 0xa970d9d9U, 0x07898e8eU, 0x33a79494U,\
0x2db69b9bU, 0x3c221e1eU, 0x15928787U, 0xc920e9e9U,\
0x8749ceceU, 0xaaff5555U, 0x50782828U, 0xa57adfdfU,\
0x038f8c8cU, 0x59f8a1a1U, 0x09808989U, 0x1a170d0dU,\
0x65dabfbfU, 0xd731e6e6U, 0x84c64242U, 0xd0b86868U,\
0x82c34141U, 0x29b09999U, 0x5a772d2dU, 0x1e110f0fU,\
0x7bcbb0b0U, 0xa8fc5454U, 0x6dd6bbbbU, 0x2c3a1616U \
};
__constant__ const uint32_t RC[256] = {\
0x0684704c, 0xe620c00a, 0xb2c5fef0, 0x75817b9d, \
0x8b66b4e1, 0x88f3a06b, 0x640f6ba4, 0x2f08f717, \
0x3402de2d, 0x53f28498, 0xcf029d60, 0x9f029114, \
0x0ed6eae6, 0x2e7b4f08, 0xbbf3bcaf, 0xfd5b4f79, \
0xcbcfb0cb, 0x4872448b, 0x79eecd1c, 0xbe397044, \
0x7eeacdee, 0x6e9032b7, 0x8d5335ed, 0x2b8a057b, \
0x67c28f43, 0x5e2e7cd0, 0xe2412761, 0xda4fef1b, \
0x2924d9b0, 0xafcacc07, 0x675ffde2, 0x1fc70b3b, \
0xab4d63f1, 0xe6867fe9, 0xecdb8fca, 0xb9d465ee, \
0x1c30bf84, 0xd4b7cd64, 0x5b2a404f, 0xad037e33, \
0xb2cc0bb9, 0x941723bf, 0x69028b2e, 0x8df69800, \
0xfa0478a6, 0xde6f5572, 0x4aaa9ec8, 0x5c9d2d8a, \
0xdfb49f2b, 0x6b772a12, 0x0efa4f2e, 0x29129fd4, \
0x1ea10344, 0xf449a236, 0x32d611ae, 0xbb6a12ee, \
0xaf044988, 0x4b050084, 0x5f9600c9, 0x9ca8eca6, \
0x21025ed8, 0x9d199c4f, 0x78a2c7e3, 0x27e593ec, \
0xbf3aaaf8, 0xa759c9b7, 0xb9282ecd, 0x82d40173, \
0x6260700d, 0x6186b017, 0x37f2efd9, 0x10307d6b, \
0x5aca45c2, 0x21300443, 0x81c29153, 0xf6fc9ac6, \
0x9223973c, 0x226b68bb, 0x2caf92e8, 0x36d1943a, \
0xd3bf9238, 0x225886eb, 0x6cbab958, 0xe51071b4, \
0xdb863ce5, 0xaef0c677, 0x933dfddd, 0x24e1128d, \
0xbb606268, 0xffeba09c, 0x83e48de3, 0xcb2212b1, \
0x734bd3dc, 0xe2e4d19c, 0x2db91a4e, 0xc72bf77d, \
0x43bb47c3, 0x61301b43, 0x4b1415c4, 0x2cb3924e, \
0xdba775a8, 0xe707eff6, 0x03b231dd, 0x16eb6899, \
0x6df3614b, 0x3c755977, 0x8e5e2302, 0x7eca472c, \
0xcda75a17, 0xd6de7d77, 0x6d1be5b9, 0xb88617f9, \
0xec6b43f0, 0x6ba8e9aa, 0x9d6c069d, 0xa946ee5d, \
0xcb1e6950, 0xf957332b, 0xa2531159, 0x3bf327c1, \
0x2cee0c75, 0x00da619c, 0xe4ed0353, 0x600ed0d9, \
0xf0b1a5a1, 0x96e90cab, 0x80bbbabc, 0x63a4a350, \
0xae3db102, 0x5e962988, 0xab0dde30, 0x938dca39, \
0x17bb8f38, 0xd554a40b, 0x8814f3a8, 0x2e75b442, \
0x34bb8a5b, 0x5f427fd7, 0xaeb6b779, 0x360a16f6, \
0x26f65241, 0xcbe55438, 0x43ce5918, 0xffbaafde, \
0x4ce99a54, 0xb9f3026a, 0xa2ca9cf7, 0x839ec978, \
0xae51a51a, 0x1bdff7be, 0x40c06e28, 0x22901235, \
0xa0c1613c, 0xba7ed22b, 0xc173bc0f, 0x48a659cf, \
0x756acc03, 0x02288288, 0x4ad6bdfd, 0xe9c59da1, \
};
#define AES_ENC_ROUND(n,D,S) \
AES_ENC_STEP(n,D,S,0,1,2,3); \
AES_ENC_STEP(n,D,S,1,2,3,0); \
AES_ENC_STEP(n,D,S,2,3,0,1); \
AES_ENC_STEP(n,D,S,3,0,1,2);
#define AES_ENC_STEP(n,D,S,W,X,Y,Z) \
D##W = TE(0)[ S##W & 0xff]; \
D##W ^= TE(1)[(S##X >> 8) & 0xff]; \
D##W ^= TE(2)[(S##Y >> 16) & 0xff]; \
D##W ^= TE(3)[ S##Z >> 24 ]; \
D##W ^= RC[n + 3 - W];
#define TX (__umul24(blockIdx.x,blockDim.x) + threadIdx.x)
#define SX (threadIdx.x)
#define TE(n) Tes##n
#define COPY_CONSTANT_SHARED_ENC\
__shared__ uint32_t Tes0[256];\
__shared__ uint32_t Tes1[256];\
__shared__ uint32_t Tes2[256];\
__shared__ uint32_t Tes3[256];\
Tes0[SX] = Te0[SX];\
Tes1[SX] = Te1[SX];\
Tes2[SX] = Te2[SX];\
Tes3[SX] = Te3[SX];\
__syncthreads();
#define GLOBAL_LOAD_SHARED_SETUP_512 \
register uint32_t r0, r1, r2, r3; \
register uint32_t s0, s1, s2, s3; \
register uint32_t t0, t1, t2, t3; \
register uint32_t u0, u1, u2, u3; \
register uint32_t v0, v1, v2, v3; \
register uint64_t load[2]; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX]; \
s0 = load[0]; \
s1 = load[0] >> 32; \
s2 = load[1]; \
s3 = load[1] >> 32; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX + 1]; \
t0 = load[0]; \
t1 = load[0] >> 32; \
t2 = load[1]; \
t3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX + 2]; \
u0 = load[0]; \
u1 = load[0] >> 32; \
u2 = load[1]; \
u3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(msg)[4 * TX + 3]; \
v0 = load[0]; \
v1 = load[0] >> 32; \
v2 = load[1]; \
v3 = load[1] >> 32;
#define MIX_512 \
r0 = s0; \
r1 = u2; \
\
s0 = s3; \
s3 = v3; \
v3 = v2; \
v2 = t2; \
t2 = v0; \
v0 = s2; \
s2 = t3; \
t3 = t0; \
t0 = u0; \
u0 = u1; \
u1 = s1; \
s1 = u3; \
u3 = t1; \
t1 = r0; \
\
u2 = v1; \
v1 = r1;
__global__ void haraka512Kernel(const uint64_t* msg, uint64_t* hash, const uint32_t num_msgs)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_msgs))
return;
GLOBAL_LOAD_SHARED_SETUP_512
AES_ENC_ROUND(0, r, s);
AES_ENC_ROUND(16, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 16, t, r);
AES_ENC_ROUND(2 * 4, r, u);
AES_ENC_ROUND(2 * 4 + 16, u, r);
AES_ENC_ROUND(3 * 4, r, v);
AES_ENC_ROUND(3 * 4 + 16, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 48, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 48, t, r);
AES_ENC_ROUND(2 * 4 + 32, r, u);
AES_ENC_ROUND(2 * 4 + 48, u, r);
AES_ENC_ROUND(3 * 4 + 32, r, v);
AES_ENC_ROUND(3 * 4 + 48, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 80, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 80, t, r);
AES_ENC_ROUND(2 * 4 + 64, r, u);
AES_ENC_ROUND(2 * 4 + 80, u, r);
AES_ENC_ROUND(3 * 4 + 64, r, v);
AES_ENC_ROUND(3 * 4 + 80, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 96, r, s);
AES_ENC_ROUND(0 * 4 + 112, s, r);
AES_ENC_ROUND(1 * 4 + 96, r, t);
AES_ENC_ROUND(1 * 4 + 112, t, r);
AES_ENC_ROUND(2 * 4 + 96, r, u);
AES_ENC_ROUND(2 * 4 + 112, u, r);
AES_ENC_ROUND(3 * 4 + 96, r, v);
AES_ENC_ROUND(3 * 4 + 112, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 128, r, s);
AES_ENC_ROUND(0 * 4 + 144, s, r);
AES_ENC_ROUND(1 * 4 + 128, r, t);
AES_ENC_ROUND(1 * 4 + 144, t, r);
AES_ENC_ROUND(2 * 4 + 128, r, u);
AES_ENC_ROUND(2 * 4 + 144, u, r);
AES_ENC_ROUND(3 * 4 + 128, r, v);
AES_ENC_ROUND(3 * 4 + 144, v, r);
MIX_512
load[1] = s2 | ((uint64_t)s3) << 32;
hash[TX * 4] = load[1] ^ msg[8 * TX + 1];
load[1] = t2 | ((uint64_t)t3) << 32;
hash[TX * 4 + 1] = load[1] ^ msg[8 * TX + 3];
load[1] = u0 | ((uint64_t)u1) << 32;
hash[TX * 4 + 2] = load[1] ^ msg[8 * TX + 4];
load[1] = v0 | ((uint64_t)v1) << 32;
load[1] ^= load[0];
hash[TX * 4 + 3] = load[1];
}
#define GLOBAL_LOAD_SHARED_SETUP_256(input) \
register uint32_t r0, r1, r2, r3; \
register uint32_t s0, s1, s2, s3; \
register uint32_t t0, t1, t2, t3; \
register uint64_t load[4]; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(input)[2 * TX]; \
s0 = load[0]; \
s1 = load[0] >> 32; \
s2 = load[1]; \
s3 = load[1] >> 32; \
reinterpret_cast<uint4*>(load)[1] = reinterpret_cast<const uint4*>(input)[2 * TX + 1]; \
t0 = load[2]; \
t1 = load[2] >> 32; \
t2 = load[3]; \
t3 = load[3] >> 32;
#define MIX_256 \
r0 = s1; \
r1 = s3; \
\
s1 = t0; \
t0 = s2; \
s2 = r0; \
\
s3 = t1; \
t1 = t2; \
t2 = r1;
__global__ void haraka256Kernel(const uint64_t* msg, uint64_t* hash, const uint32_t num_msgs)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_msgs))
return;
GLOBAL_LOAD_SHARED_SETUP_256(msg)
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
reinterpret_cast<uint4*>(hash)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
reinterpret_cast<uint4*>(hash)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
__global__ void harakaOTSKernel(const uint64_t* msg, uint16_t* b, const uint32_t num_msgs)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_msgs))
return;
GLOBAL_LOAD_SHARED_SETUP_256(msg)
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
#pragma unroll
for(int i = 1; i < 17; ++i)
b[17 * TX + i] = reinterpret_cast<uint16_t*>(load)[i - 1];
uint16_t checksum = 256 - reinterpret_cast<uint8_t*>(load)[0];
#pragma unroll
for (int i = 1; i < 32; ++i)
checksum += (256 - reinterpret_cast<uint8_t*>(load)[i]);
b[17 * TX] = checksum;
}
__global__ void harakaOTSCreateSignatureKernel(const uint64_t* priv_key, uint64_t* pub_key, uint64_t* signature, const uint8_t* b, const uint32_t num_chunks)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_chunks))
return;
GLOBAL_LOAD_SHARED_SETUP_256(priv_key)
uint8_t b_ = b[TX];
#pragma unroll
for (int i = 0; i < 255; ++i)
{
if (i == b_)
{
reinterpret_cast<uint4*>(signature)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(signature)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
if (i < 255)
{
s0 = load[0];
s1 = load[0] >> 32;
s2 = load[1];
s3 = load[1] >> 32;
t0 = load[2];
t1 = load[2] >> 32;
t2 = load[3];
t3 = load[3] >> 32;
}
}
if (b_ == 255)
{
reinterpret_cast<uint4*>(signature)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(signature)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
reinterpret_cast<uint4*>(pub_key)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(pub_key)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
__global__ void harakaOTSCreateVerificationKernel(const uint64_t* signature, uint64_t* verification, uint8_t* b, const uint32_t num_chunks)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_chunks))
return;
GLOBAL_LOAD_SHARED_SETUP_256(signature)
uint8_t b_ = UINT8_MAX - b[TX];
for (uint8_t i = 0; i < b_; ++i)
{
AES_ENC_ROUND(0 * 4, r, s);
AES_ENC_ROUND(0 * 4 + 8, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 8, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 16, r, s);
AES_ENC_ROUND(0 * 4 + 24, s, r);
AES_ENC_ROUND(1 * 4 + 16, r, t);
AES_ENC_ROUND(1 * 4 + 24, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 40, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 40, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 48, r, s);
AES_ENC_ROUND(0 * 4 + 56, s, r);
AES_ENC_ROUND(1 * 4 + 48, r, t);
AES_ENC_ROUND(1 * 4 + 56, t, r);
MIX_256
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 72, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 72, t, r);
MIX_256
load[0] ^= (s0 | ((uint64_t)s1) << 32);
load[1] ^= (s2 | ((uint64_t)s3) << 32);
load[2] ^= (t0 | ((uint64_t)t1) << 32);
load[3] ^= (t2 | ((uint64_t)t3) << 32);
if (i < 255)
{
s0 = load[0];
s1 = load[0] >> 32;
s2 = load[1];
s3 = load[1] >> 32;
t0 = load[2];
t1 = load[2] >> 32;
t2 = load[3];
t3 = load[3] >> 32;
}
}
reinterpret_cast<uint4*>(verification)[2 * TX] = reinterpret_cast<uint4*>(load)[0];
reinterpret_cast<uint4*>(verification)[2 * TX + 1] = reinterpret_cast<uint4*>(load)[1];
}
#define GLOBAL_LOAD_SHARED_SETUP_MERKLE \
register uint32_t r0, r1, r2, r3; \
register uint32_t s0, s1, s2, s3; \
register uint32_t t0, t1, t2, t3; \
register uint32_t u0, u1, u2, u3; \
register uint32_t v0, v1, v2, v3; \
register uint64_t load[2]; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX]; \
s0 = load[0]; \
s1 = load[0] >> 32; \
s2 = load[1]; \
s3 = load[1] >> 32; \
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX + 1]; \
t0 = load[0]; \
t1 = load[0] >> 32; \
t2 = load[1]; \
t3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX + 2]; \
u0 = load[0]; \
u1 = load[0] >> 32; \
u2 = load[1]; \
u3 = load[1] >> 32;\
reinterpret_cast<uint4*>(load)[0] = reinterpret_cast<const uint4*>(tree)[(childs - 1) * 2 + 4 * TX + 3]; \
v0 = load[0]; \
v1 = load[0] >> 32; \
v2 = load[1]; \
v3 = load[1] >> 32;
__global__ void harakaBuildMerkleTree(uint64_t* tree, const uint32_t num_parents, const uint32_t depth)
{
COPY_CONSTANT_SHARED_ENC
if (TX >= (num_parents))
return;
uint32_t childs = num_parents * 2;
GLOBAL_LOAD_SHARED_SETUP_MERKLE
for (int s = 1; s < (1 << depth); s *= 2)
{
if ((threadIdx.x % s) == 0)
{
AES_ENC_ROUND(0, r, s);
AES_ENC_ROUND(16, s, r);
AES_ENC_ROUND(1 * 4, r, t);
AES_ENC_ROUND(1 * 4 + 16, t, r);
AES_ENC_ROUND(2 * 4, r, u);
AES_ENC_ROUND(2 * 4 + 16, u, r);
AES_ENC_ROUND(3 * 4, r, v);
AES_ENC_ROUND(3 * 4 + 16, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 32, r, s);
AES_ENC_ROUND(0 * 4 + 48, s, r);
AES_ENC_ROUND(1 * 4 + 32, r, t);
AES_ENC_ROUND(1 * 4 + 48, t, r);
AES_ENC_ROUND(2 * 4 + 32, r, u);
AES_ENC_ROUND(2 * 4 + 48, u, r);
AES_ENC_ROUND(3 * 4 + 32, r, v);
AES_ENC_ROUND(3 * 4 + 48, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 64, r, s);
AES_ENC_ROUND(0 * 4 + 80, s, r);
AES_ENC_ROUND(1 * 4 + 64, r, t);
AES_ENC_ROUND(1 * 4 + 80, t, r);
AES_ENC_ROUND(2 * 4 + 64, r, u);
AES_ENC_ROUND(2 * 4 + 80, u, r);
AES_ENC_ROUND(3 * 4 + 64, r, v);
AES_ENC_ROUND(3 * 4 + 80, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 96, r, s);
AES_ENC_ROUND(0 * 4 + 112, s, r);
AES_ENC_ROUND(1 * 4 + 96, r, t);
AES_ENC_ROUND(1 * 4 + 112, t, r);
AES_ENC_ROUND(2 * 4 + 96, r, u);
AES_ENC_ROUND(2 * 4 + 112, u, r);
AES_ENC_ROUND(3 * 4 + 96, r, v);
AES_ENC_ROUND(3 * 4 + 112, v, r);
MIX_512
AES_ENC_ROUND(0 * 4 + 128, r, s);
AES_ENC_ROUND(0 * 4 + 144, s, r);
AES_ENC_ROUND(1 * 4 + 128, r, t);
AES_ENC_ROUND(1 * 4 + 144, t, r);
AES_ENC_ROUND(2 * 4 + 128, r, u);
AES_ENC_ROUND(2 * 4 + 144, u, r);
AES_ENC_ROUND(3 * 4 + 128, r, v);
AES_ENC_ROUND(3 * 4 + 144, v, r);
MIX_512
load[1] = s2 | ((uint64_t)s3) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 1];
tree[(childs / 2 - 1) * 4 + TX / s * 4] = load[1];
s0 = load[1];
s1 = load[1] >> 32;
load[1] = t2 | ((uint64_t)t3) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 3];
tree[(childs / 2 - 1) * 4 + TX / s * 4 + 1] = load[1];
s2 = load[1];
s3 = load[1] >> 32;
load[1] = u0 | ((uint64_t)u1) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 4];
tree[(childs / 2 - 1) * 4 + TX / s * 4 + 2] = load[1];
t0 = load[1];
t1 = load[1] >> 32;
load[1] = v0 | ((uint64_t)v1) << 32;
load[1] ^= tree[(childs - 1) * 4 + 8 * TX / s + 6];
tree[(childs / 2 - 1) * 4 + TX / s * 4 + 3] = load[1];
t2 = load[1];
t3 = load[1] >> 32;
u0 = __shfl_down(s0, s);
u1 = __shfl_down(s1, s);
u2 = __shfl_down(s2, s);
u3 = __shfl_down(s3, s);
v0 = __shfl_down(t0, s);
v1 = __shfl_down(t1, s);
v2 = __shfl_down(t2, s);
v3 = __shfl_down(t3, s);
childs = childs >> 1;
}
}
} |
0ccb2c2e3bb0dccffc7aaada5f8bdc4f3228680b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
// Global types and parameters
////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include <sys/time.h>
#include "binomialOptions_common.h"
#include "realtype.h"
////////////////////////////////////////////////////////////////////////////////
// Overloaded shortcut functions for different precision modes
////////////////////////////////////////////////////////////////////////////////
#ifndef DOUBLE_PRECISION
__device__ inline float expiryCallValue(float S, float X, float vDt, int i)
{
float d = S * __expf(vDt * (2.0f * i - NUM_STEPS)) - X;
return (d > 0.0F) ? d : 0.0F;
}
#else
__device__ inline double expiryCallValue(double S, double X, double vDt, int i)
{
double d = S * exp(vDt * (2.0 * i - NUM_STEPS)) - X;
return (d > 0.0) ? d : 0.0;
}
#endif
////////////////////////////////////////////////////////////////////////////////
// GPU kernel
////////////////////////////////////////////////////////////////////////////////
#define THREADBLOCK_SIZE 128
#define ELEMS_PER_THREAD (NUM_STEPS/THREADBLOCK_SIZE)
#if NUM_STEPS % THREADBLOCK_SIZE
#error Bad constants
#endif
__global__ void
binomialOptionsKernel(option_data_t *d_OptionData, real *d_CallValue)
{
__shared__ real call_exchange[THREADBLOCK_SIZE + 1];
const int tid = threadIdx.x;
const real S = d_OptionData[blockIdx.x].S;
const real X = d_OptionData[blockIdx.x].X;
const real vDt = d_OptionData[blockIdx.x].vDt;
const real puByDf = d_OptionData[blockIdx.x].puByDf;
const real pdByDf = d_OptionData[blockIdx.x].pdByDf;
real call[ELEMS_PER_THREAD + 1];
#pragma unroll
for(int i = 0; i < ELEMS_PER_THREAD; ++i)
call[i] = expiryCallValue(S, X, vDt, tid * ELEMS_PER_THREAD + i);
if (tid == 0)
call_exchange[THREADBLOCK_SIZE] = expiryCallValue(S, X, vDt, NUM_STEPS);
int final_it = max(0, tid * ELEMS_PER_THREAD - 1);
#pragma unroll 16
for(int i = NUM_STEPS; i > 0; --i) {
call_exchange[tid] = call[0];
__syncthreads();
call[ELEMS_PER_THREAD] = call_exchange[tid + 1];
__syncthreads();
if (i > final_it) {
#pragma unroll
for(int j = 0; j < ELEMS_PER_THREAD; ++j)
call[j] = puByDf * call[j + 1] + pdByDf * call[j];
}
}
if (tid == 0) {
d_CallValue[blockIdx.x] = call[0];
}
}
| 0ccb2c2e3bb0dccffc7aaada5f8bdc4f3228680b.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
// Global types and parameters
////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include <sys/time.h>
#include "binomialOptions_common.h"
#include "realtype.h"
////////////////////////////////////////////////////////////////////////////////
// Overloaded shortcut functions for different precision modes
////////////////////////////////////////////////////////////////////////////////
#ifndef DOUBLE_PRECISION
__device__ inline float expiryCallValue(float S, float X, float vDt, int i)
{
float d = S * __expf(vDt * (2.0f * i - NUM_STEPS)) - X;
return (d > 0.0F) ? d : 0.0F;
}
#else
__device__ inline double expiryCallValue(double S, double X, double vDt, int i)
{
double d = S * exp(vDt * (2.0 * i - NUM_STEPS)) - X;
return (d > 0.0) ? d : 0.0;
}
#endif
////////////////////////////////////////////////////////////////////////////////
// GPU kernel
////////////////////////////////////////////////////////////////////////////////
#define THREADBLOCK_SIZE 128
#define ELEMS_PER_THREAD (NUM_STEPS/THREADBLOCK_SIZE)
#if NUM_STEPS % THREADBLOCK_SIZE
#error Bad constants
#endif
__global__ void
binomialOptionsKernel(option_data_t *d_OptionData, real *d_CallValue)
{
__shared__ real call_exchange[THREADBLOCK_SIZE + 1];
const int tid = threadIdx.x;
const real S = d_OptionData[blockIdx.x].S;
const real X = d_OptionData[blockIdx.x].X;
const real vDt = d_OptionData[blockIdx.x].vDt;
const real puByDf = d_OptionData[blockIdx.x].puByDf;
const real pdByDf = d_OptionData[blockIdx.x].pdByDf;
real call[ELEMS_PER_THREAD + 1];
#pragma unroll
for(int i = 0; i < ELEMS_PER_THREAD; ++i)
call[i] = expiryCallValue(S, X, vDt, tid * ELEMS_PER_THREAD + i);
if (tid == 0)
call_exchange[THREADBLOCK_SIZE] = expiryCallValue(S, X, vDt, NUM_STEPS);
int final_it = max(0, tid * ELEMS_PER_THREAD - 1);
#pragma unroll 16
for(int i = NUM_STEPS; i > 0; --i) {
call_exchange[tid] = call[0];
__syncthreads();
call[ELEMS_PER_THREAD] = call_exchange[tid + 1];
__syncthreads();
if (i > final_it) {
#pragma unroll
for(int j = 0; j < ELEMS_PER_THREAD; ++j)
call[j] = puByDf * call[j + 1] + pdByDf * call[j];
}
}
if (tid == 0) {
d_CallValue[blockIdx.x] = call[0];
}
}
|
18d797edcf2bfa4f6c297e94756579e957573988.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer;
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| 18d797edcf2bfa4f6c297e94756579e957573988.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
Dtype normalizer = LossLayer<Dtype>::GetNormalizer(
normalization_, outer_num_, inner_num_, valid_count);
const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer;
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
2256d69873e7337cd8ca8bf63e4a49e751abbdfa.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Alexander Ocsa <[email protected]>
* Copyright 2018 Felipe Aramburu <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "helper/utils.cuh"
#include <tests/utilities/cudf_test_fixtures.h>
#include <cudf.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <gtest/gtest.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <tuple>
/*
============================================================================
Description : Compute gdf_comparison and apply_stencil of gdf_columns using Thrust on GPU
============================================================================
*/
struct FilterOperationsTest : public GdfTest {};
TEST_F(FilterOperationsTest, usage_example) {
using LeftValueType = int16_t;
using RightValueType = int16_t;
int column_size = 10;
int init_value = 10;
int max_size = 4;
gdf_comparison_operator gdf_operator = GDF_EQUALS;
gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0
gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4
gdf_column output = gen_gdb_column<int8_t>(column_size, 0);
gdf_error error = gdf_comparison(&lhs, &rhs, &output, gdf_operator);
EXPECT_TRUE(error == GDF_SUCCESS);
std::cout << "Left" << std::endl;
print_column<LeftValueType>(&lhs);
std::cout << "Right" << std::endl;
print_column<RightValueType>(&rhs);
std::cout << "Output" << std::endl;
print_column<int8_t>(&output);
check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator);
/// lhs.dtype === rhs.dtype
gdf_apply_stencil(&lhs, &output, &rhs);
check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
template <typename LeftValueType, typename RightValueType>
void test_filterops_using_templates(gdf_comparison_operator gdf_operator = GDF_EQUALS)
{
//0, ..., 100,
//100, 10000, 10000, 100000
for (int column_size = 0; column_size < 10; column_size += 1)
{
const int max_size = 8;
for (int init_value = 0; init_value <= 1; init_value++)
{
gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0
// lhs.null_count = 2;
gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4
// rhs.null_count = 1;
gdf_column output = gen_gdb_column<int8_t>(column_size, 0);
gdf_error error = gdf_comparison(&lhs, &rhs, &output, gdf_operator);
EXPECT_TRUE(error == GDF_SUCCESS);
check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator);
if (lhs.dtype == rhs.dtype ) {
gdf_apply_stencil(&lhs, &output, &rhs);
check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs);
}
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
}
}
TEST_F(FilterOperationsTest, WithInt8AndOthers)
{
test_filterops_using_templates<int8_t, int8_t>();
test_filterops_using_templates<int8_t, int16_t>();
test_filterops_using_templates<int8_t, int32_t>();
test_filterops_using_templates<int8_t, int64_t>();
test_filterops_using_templates<int8_t, float>();
test_filterops_using_templates<int8_t, double>();
}
TEST_F(FilterOperationsTest, WithInt16AndOthers)
{
test_filterops_using_templates<int16_t, int8_t>();
test_filterops_using_templates<int16_t, int16_t>();
test_filterops_using_templates<int16_t, int32_t>();
test_filterops_using_templates<int16_t, int64_t>();
test_filterops_using_templates<int16_t, float>();
test_filterops_using_templates<int16_t, double>();
}
TEST_F(FilterOperationsTest, WithInt32AndOthers)
{
test_filterops_using_templates<int32_t, int8_t>();
test_filterops_using_templates<int32_t, int16_t>();
test_filterops_using_templates<int32_t, int32_t>();
test_filterops_using_templates<int32_t, int64_t>();
test_filterops_using_templates<int32_t, float>();
test_filterops_using_templates<int32_t, double>();
}
TEST_F(FilterOperationsTest, WithInt64AndOthers)
{
test_filterops_using_templates<int64_t, int8_t>();
test_filterops_using_templates<int64_t, int16_t>();
test_filterops_using_templates<int64_t, int32_t>();
test_filterops_using_templates<int64_t, int64_t>();
test_filterops_using_templates<int64_t, float>();
test_filterops_using_templates<int64_t, double>();
}
TEST_F(FilterOperationsTest, WithFloat32AndOthers)
{
test_filterops_using_templates<float, int8_t>();
test_filterops_using_templates<float, int16_t>();
test_filterops_using_templates<float, int32_t>();
test_filterops_using_templates<float, int64_t>();
test_filterops_using_templates<float, float>();
test_filterops_using_templates<float, double>();
}
TEST_F(FilterOperationsTest, WithFloat64AndOthers)
{
test_filterops_using_templates<double, int8_t>();
test_filterops_using_templates<double, int16_t>();
test_filterops_using_templates<double, int32_t>();
test_filterops_using_templates<double, int64_t>();
test_filterops_using_templates<double, float>();
test_filterops_using_templates<double, double>();
}
| 2256d69873e7337cd8ca8bf63e4a49e751abbdfa.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Alexander Ocsa <[email protected]>
* Copyright 2018 Felipe Aramburu <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "helper/utils.cuh"
#include <tests/utilities/cudf_test_fixtures.h>
#include <cudf.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <gtest/gtest.h>
#include <cuda_runtime.h>
#include <iostream>
#include <tuple>
/*
============================================================================
Description : Compute gdf_comparison and apply_stencil of gdf_columns using Thrust on GPU
============================================================================
*/
struct FilterOperationsTest : public GdfTest {};
TEST_F(FilterOperationsTest, usage_example) {
using LeftValueType = int16_t;
using RightValueType = int16_t;
int column_size = 10;
int init_value = 10;
int max_size = 4;
gdf_comparison_operator gdf_operator = GDF_EQUALS;
gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0
gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4
gdf_column output = gen_gdb_column<int8_t>(column_size, 0);
gdf_error error = gdf_comparison(&lhs, &rhs, &output, gdf_operator);
EXPECT_TRUE(error == GDF_SUCCESS);
std::cout << "Left" << std::endl;
print_column<LeftValueType>(&lhs);
std::cout << "Right" << std::endl;
print_column<RightValueType>(&rhs);
std::cout << "Output" << std::endl;
print_column<int8_t>(&output);
check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator);
/// lhs.dtype === rhs.dtype
gdf_apply_stencil(&lhs, &output, &rhs);
check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs);
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
template <typename LeftValueType, typename RightValueType>
void test_filterops_using_templates(gdf_comparison_operator gdf_operator = GDF_EQUALS)
{
//0, ..., 100,
//100, 10000, 10000, 100000
for (int column_size = 0; column_size < 10; column_size += 1)
{
const int max_size = 8;
for (int init_value = 0; init_value <= 1; init_value++)
{
gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0
// lhs.null_count = 2;
gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4
// rhs.null_count = 1;
gdf_column output = gen_gdb_column<int8_t>(column_size, 0);
gdf_error error = gdf_comparison(&lhs, &rhs, &output, gdf_operator);
EXPECT_TRUE(error == GDF_SUCCESS);
check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator);
if (lhs.dtype == rhs.dtype ) {
gdf_apply_stencil(&lhs, &output, &rhs);
check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs);
}
delete_gdf_column(&lhs);
delete_gdf_column(&rhs);
delete_gdf_column(&output);
}
}
}
TEST_F(FilterOperationsTest, WithInt8AndOthers)
{
test_filterops_using_templates<int8_t, int8_t>();
test_filterops_using_templates<int8_t, int16_t>();
test_filterops_using_templates<int8_t, int32_t>();
test_filterops_using_templates<int8_t, int64_t>();
test_filterops_using_templates<int8_t, float>();
test_filterops_using_templates<int8_t, double>();
}
TEST_F(FilterOperationsTest, WithInt16AndOthers)
{
test_filterops_using_templates<int16_t, int8_t>();
test_filterops_using_templates<int16_t, int16_t>();
test_filterops_using_templates<int16_t, int32_t>();
test_filterops_using_templates<int16_t, int64_t>();
test_filterops_using_templates<int16_t, float>();
test_filterops_using_templates<int16_t, double>();
}
TEST_F(FilterOperationsTest, WithInt32AndOthers)
{
test_filterops_using_templates<int32_t, int8_t>();
test_filterops_using_templates<int32_t, int16_t>();
test_filterops_using_templates<int32_t, int32_t>();
test_filterops_using_templates<int32_t, int64_t>();
test_filterops_using_templates<int32_t, float>();
test_filterops_using_templates<int32_t, double>();
}
TEST_F(FilterOperationsTest, WithInt64AndOthers)
{
test_filterops_using_templates<int64_t, int8_t>();
test_filterops_using_templates<int64_t, int16_t>();
test_filterops_using_templates<int64_t, int32_t>();
test_filterops_using_templates<int64_t, int64_t>();
test_filterops_using_templates<int64_t, float>();
test_filterops_using_templates<int64_t, double>();
}
TEST_F(FilterOperationsTest, WithFloat32AndOthers)
{
test_filterops_using_templates<float, int8_t>();
test_filterops_using_templates<float, int16_t>();
test_filterops_using_templates<float, int32_t>();
test_filterops_using_templates<float, int64_t>();
test_filterops_using_templates<float, float>();
test_filterops_using_templates<float, double>();
}
TEST_F(FilterOperationsTest, WithFloat64AndOthers)
{
test_filterops_using_templates<double, int8_t>();
test_filterops_using_templates<double, int16_t>();
test_filterops_using_templates<double, int32_t>();
test_filterops_using_templates<double, int64_t>();
test_filterops_using_templates<double, float>();
test_filterops_using_templates<double, double>();
}
|
23b741467d549687cc0f516dbe41175019ca336d.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <c10/util/Exception.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <TH/THGeneral.h>
#include <hipsparse.h>
#if !defined(_MSC_VER) && defined(__HIPCC__) && CUSPARSE_VERSION >= 10301 // CUDA release >= 10.2 and not windows
#include <hip/library_types.h>
#endif
// LIMITATION (hipsparseSpMM):
// The generic APIs are currently (CUDA 10.1) available for all platforms except Windows.
// Using these APIs in any other systems will result in compile-time or run-time failures.
// Their support will be extended in the next releases.
#if !defined(CUSPARSE_VERSION) || (CUSPARSE_VERSION < 10200)
const char* hipsparseGetErrorString(hipsparseStatus_t status) {
switch(status)
{
case HIPSPARSE_STATUS_SUCCESS:
return "success";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
return "unknown error";
}
}
#endif
namespace at { namespace native { namespace sparse { namespace cuda {
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
TORCH_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"hipsparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
int i_m = (int)m;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoo2csr(handle, coorowind, i_nnz, i_m, csrrowptr, HIPSPARSE_INDEX_BASE_ZERO));
}
hipsparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return HIPSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return HIPSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
#if !defined(_MSC_VER) && defined(__HIPCC__) && CUSPARSE_VERSION >= 10301 // CUDA release >= 10.2 and not windows
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
static_assert(std::is_same<float, T>::value || std::is_same<double, T>::value, "csrmm2 only supports float and double value types");
constexpr auto cusparse_value_type = std::is_same<float, T>::value ? HIP_R_32F : HIP_R_64F;
if (csrvala == nullptr || b == nullptr || c == nullptr) return;
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
// hipsparseSpMM actually supports int64_t.
// In order to support int64 here, index pointers csrrowptra, csrcolinda have to be passed as int64_t.
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"At the moment, hipsparseSpMM only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX, ".",
"If you need this, please file an issue on GitHub."
);
int64_t ma = m, ka = k;
if (transa != 'n') std::swap(ma, ka);
hipsparseSpMatDescr_t descA;
TORCH_CUDASPARSE_CHECK(hipsparseCreateCsr(
&descA, /* output */
ma, ka, nnz, /* rows, cols, number of non zero elements */
csrrowptra, /* row offsets of the sparse matrix, size = rows +1 */
csrcolinda, /* column indices of the sparse matrix, size = nnz */
csrvala, /* values of the sparse matrix, size = nnz */
HIPSPARSE_INDEX_32I, /* data type of row offsets index */
HIPSPARSE_INDEX_32I, /* data type of col indices */
HIPSPARSE_INDEX_BASE_ZERO, /* base index of row offset and col indes */
cusparse_value_type /* data type of values */
));
int64_t kb = k, nb = n;
if (transb != 'n') std::swap(kb, nb);
hipsparseDnMatDescr_t descB;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&descB, /* output */
kb, nb, ldb, /* rows, cols, leading dimension */
b, /* values */
cusparse_value_type, /* data type of values */
HIPSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
hipsparseDnMatDescr_t descC;
TORCH_CUDASPARSE_CHECK(hipsparseCreateDnMat(
&descC, /* output */
m, n, ldc, /* rows, cols, leading dimension */
c, /* values */
cusparse_value_type, /* data type of values */
HIPSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
auto handle = at::cuda::getCurrentCUDASparseHandle();
// hipsparseSpMM_bufferSize returns the bufferSize that can be used by hipsparseSpMM
size_t bufferSize;
TORCH_CUDASPARSE_CHECK(hipsparseSpMM_bufferSize(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
HIPSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
&bufferSize /* output */
));
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
auto dataPtr = allocator.allocate(bufferSize);
TORCH_CUDASPARSE_CHECK(hipsparseSpMM(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
HIPSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
dataPtr.get() /* external buffer */
));
TORCH_CUDASPARSE_CHECK(hipsparseDestroySpMat(descA));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(descB));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyDnMat(descC));
// TODO: Proper fix is to create real descriptor classes
}
template void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc);
template void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc);
#else
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
// TODO: Proper fix is to create real descriptor classes
}
// T can only be float or double
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
TORCH_INTERNAL_ASSERT(false, "cusparse csr MM only supports data type of float and double.");
}
template<> void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
Scsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
template<> void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
Dcsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
#endif
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
TORCH_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
| 23b741467d549687cc0f516dbe41175019ca336d.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Exception.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <c10/cuda/CUDACachingAllocator.h>
#include <TH/THGeneral.h>
#include <cusparse.h>
#if !defined(_MSC_VER) && defined(__CUDACC__) && CUSPARSE_VERSION >= 10301 // CUDA release >= 10.2 and not windows
#include <library_types.h>
#endif
// LIMITATION (cusparseSpMM):
// The generic APIs are currently (CUDA 10.1) available for all platforms except Windows.
// Using these APIs in any other systems will result in compile-time or run-time failures.
// Their support will be extended in the next releases.
#if !defined(CUSPARSE_VERSION) || (CUSPARSE_VERSION < 10200)
const char* cusparseGetErrorString(cusparseStatus_t status) {
switch(status)
{
case CUSPARSE_STATUS_SUCCESS:
return "success";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case CUSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
return "unknown error";
}
}
#endif
namespace at { namespace native { namespace sparse { namespace cuda {
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
TORCH_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"cusparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
int i_m = (int)m;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoo2csr(handle, coorowind, i_nnz, i_m, csrrowptr, CUSPARSE_INDEX_BASE_ZERO));
}
cusparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return CUSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return CUSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
#if !defined(_MSC_VER) && defined(__CUDACC__) && CUSPARSE_VERSION >= 10301 // CUDA release >= 10.2 and not windows
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
static_assert(std::is_same<float, T>::value || std::is_same<double, T>::value, "csrmm2 only supports float and double value types");
constexpr auto cusparse_value_type = std::is_same<float, T>::value ? CUDA_R_32F : CUDA_R_64F;
if (csrvala == nullptr || b == nullptr || c == nullptr) return;
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
// cusparseSpMM actually supports int64_t.
// In order to support int64 here, index pointers csrrowptra, csrcolinda have to be passed as int64_t.
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"At the moment, cusparseSpMM only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX, ".",
"If you need this, please file an issue on GitHub."
);
int64_t ma = m, ka = k;
if (transa != 'n') std::swap(ma, ka);
cusparseSpMatDescr_t descA;
TORCH_CUDASPARSE_CHECK(cusparseCreateCsr(
&descA, /* output */
ma, ka, nnz, /* rows, cols, number of non zero elements */
csrrowptra, /* row offsets of the sparse matrix, size = rows +1 */
csrcolinda, /* column indices of the sparse matrix, size = nnz */
csrvala, /* values of the sparse matrix, size = nnz */
CUSPARSE_INDEX_32I, /* data type of row offsets index */
CUSPARSE_INDEX_32I, /* data type of col indices */
CUSPARSE_INDEX_BASE_ZERO, /* base index of row offset and col indes */
cusparse_value_type /* data type of values */
));
int64_t kb = k, nb = n;
if (transb != 'n') std::swap(kb, nb);
cusparseDnMatDescr_t descB;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&descB, /* output */
kb, nb, ldb, /* rows, cols, leading dimension */
b, /* values */
cusparse_value_type, /* data type of values */
CUSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
cusparseDnMatDescr_t descC;
TORCH_CUDASPARSE_CHECK(cusparseCreateDnMat(
&descC, /* output */
m, n, ldc, /* rows, cols, leading dimension */
c, /* values */
cusparse_value_type, /* data type of values */
CUSPARSE_ORDER_COL /* memory layout, ONLY column-major is supported now */
));
auto handle = at::cuda::getCurrentCUDASparseHandle();
// cusparseSpMM_bufferSize returns the bufferSize that can be used by cusparseSpMM
size_t bufferSize;
TORCH_CUDASPARSE_CHECK(cusparseSpMM_bufferSize(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
CUSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
&bufferSize /* output */
));
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
auto dataPtr = allocator.allocate(bufferSize);
TORCH_CUDASPARSE_CHECK(cusparseSpMM(
handle, opa, opb,
&alpha,
descA, descB,
&beta,
descC,
cusparse_value_type, /* data type in which the computation is executed */
CUSPARSE_CSRMM_ALG1, /* default computing algorithm for CSR sparse matrix format */
dataPtr.get() /* external buffer */
));
TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(descA));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(descB));
TORCH_CUDASPARSE_CHECK(cusparseDestroyDnMat(descC));
// TODO: Proper fix is to create real descriptor classes
}
template void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc);
template void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc);
#else
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
// TODO: Proper fix is to create real descriptor classes
}
// T can only be float or double
template<typename T>
void csrmm2(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
T alpha, T *csrvala, int *csrrowptra, int *csrcolinda,
T *b, int64_t ldb, T beta, T *c, int64_t ldc)
{
TORCH_INTERNAL_ASSERT(false, "cusparse csr MM only supports data type of float and double.");
}
template<> void csrmm2<float>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
float alpha, float *csrvala, int *csrrowptra, int *csrcolinda,
float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
Scsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
template<> void csrmm2<double>(
char transa, char transb,
int64_t m, int64_t n, int64_t k, int64_t nnz,
double alpha, double *csrvala, int *csrrowptra, int *csrcolinda,
double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
Dcsrmm2(transa, transb, m, n, k, nnz, alpha, csrvala, csrrowptra, csrcolinda, b, ldb, beta, c, ldc);
}
#endif
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
TORCH_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
|
ee2e3ef00aebcbd0459531d32ab0b2c96162f511.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_HIP)
#include "paddle/phi/kernels/decode_jpeg_kernel.h"
#include "paddle/phi/backends/dynload/nvjpeg.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/stream.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
static hipStream_t nvjpeg_stream = nullptr;
static nvjpegHandle_t nvjpeg_handle = nullptr;
void InitNvjpegImage(nvjpegImage_t* img) {
for (int c = 0; c < NVJPEG_MAX_COMPONENT; c++) {
img->channel[c] = nullptr;
img->pitch[c] = 0;
}
}
template <typename T, typename Context>
void DecodeJpegKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::string& mode,
DenseTensor* out) {
// Create nvJPEG handle
if (nvjpeg_handle == nullptr) {
nvjpegStatus_t create_status =
phi::dynload::nvjpegCreateSimple(&nvjpeg_handle);
PADDLE_ENFORCE_EQ(
create_status,
NVJPEG_STATUS_SUCCESS,
errors::Fatal("nvjpegCreateSimple failed: ", create_status));
}
nvjpegJpegState_t nvjpeg_state;
nvjpegStatus_t state_status =
phi::dynload::nvjpegJpegStateCreate(nvjpeg_handle, &nvjpeg_state);
PADDLE_ENFORCE_EQ(
state_status,
NVJPEG_STATUS_SUCCESS,
errors::Fatal("nvjpegJpegStateCreate failed: ", state_status));
int components;
nvjpegChromaSubsampling_t subsampling;
int widths[NVJPEG_MAX_COMPONENT];
int heights[NVJPEG_MAX_COMPONENT];
auto* x_data = x.data<T>();
nvjpegStatus_t info_status =
phi::dynload::nvjpegGetImageInfo(nvjpeg_handle,
x_data,
(std::size_t)x.numel(),
&components,
&subsampling,
widths,
heights);
PADDLE_ENFORCE_EQ(info_status,
NVJPEG_STATUS_SUCCESS,
errors::Fatal("nvjpegGetImageInfo failed: ", info_status));
int width = widths[0];
int height = heights[0];
nvjpegOutputFormat_t output_format;
int output_components;
if (mode == "unchanged") {
if (components == 1) {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (components == 3) {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
phi::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
} else if (mode == "gray") {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (mode == "rgb") {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
phi::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
nvjpegImage_t out_image;
InitNvjpegImage(&out_image);
// create nvjpeg stream
if (nvjpeg_stream == nullptr) {
hipStreamCreateWithFlags(&nvjpeg_stream, hipStreamNonBlocking);
}
int sz = widths[0] * heights[0];
std::vector<int64_t> out_shape = {output_components, height, width};
out->Resize(phi::make_ddim(out_shape));
T* data = dev_ctx.template Alloc<T>(out);
for (int c = 0; c < output_components; c++) {
out_image.channel[c] = data + c * sz;
out_image.pitch[c] = width;
}
nvjpegStatus_t decode_status = phi::dynload::nvjpegDecode(nvjpeg_handle,
nvjpeg_state,
x_data,
x.numel(),
output_format,
&out_image,
nvjpeg_stream);
}
} // namespace phi
PD_REGISTER_KERNEL(decode_jpeg, // cuda_only
GPU,
ALL_LAYOUT,
phi::DecodeJpegKernel,
uint8_t) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
}
#endif
| ee2e3ef00aebcbd0459531d32ab0b2c96162f511.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_HIP)
#include "paddle/phi/kernels/decode_jpeg_kernel.h"
#include "paddle/phi/backends/dynload/nvjpeg.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/stream.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
static cudaStream_t nvjpeg_stream = nullptr;
static nvjpegHandle_t nvjpeg_handle = nullptr;
void InitNvjpegImage(nvjpegImage_t* img) {
for (int c = 0; c < NVJPEG_MAX_COMPONENT; c++) {
img->channel[c] = nullptr;
img->pitch[c] = 0;
}
}
template <typename T, typename Context>
void DecodeJpegKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::string& mode,
DenseTensor* out) {
// Create nvJPEG handle
if (nvjpeg_handle == nullptr) {
nvjpegStatus_t create_status =
phi::dynload::nvjpegCreateSimple(&nvjpeg_handle);
PADDLE_ENFORCE_EQ(
create_status,
NVJPEG_STATUS_SUCCESS,
errors::Fatal("nvjpegCreateSimple failed: ", create_status));
}
nvjpegJpegState_t nvjpeg_state;
nvjpegStatus_t state_status =
phi::dynload::nvjpegJpegStateCreate(nvjpeg_handle, &nvjpeg_state);
PADDLE_ENFORCE_EQ(
state_status,
NVJPEG_STATUS_SUCCESS,
errors::Fatal("nvjpegJpegStateCreate failed: ", state_status));
int components;
nvjpegChromaSubsampling_t subsampling;
int widths[NVJPEG_MAX_COMPONENT];
int heights[NVJPEG_MAX_COMPONENT];
auto* x_data = x.data<T>();
nvjpegStatus_t info_status =
phi::dynload::nvjpegGetImageInfo(nvjpeg_handle,
x_data,
(std::size_t)x.numel(),
&components,
&subsampling,
widths,
heights);
PADDLE_ENFORCE_EQ(info_status,
NVJPEG_STATUS_SUCCESS,
errors::Fatal("nvjpegGetImageInfo failed: ", info_status));
int width = widths[0];
int height = heights[0];
nvjpegOutputFormat_t output_format;
int output_components;
if (mode == "unchanged") {
if (components == 1) {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (components == 3) {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
phi::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
} else if (mode == "gray") {
output_format = NVJPEG_OUTPUT_Y;
output_components = 1;
} else if (mode == "rgb") {
output_format = NVJPEG_OUTPUT_RGB;
output_components = 3;
} else {
phi::dynload::nvjpegJpegStateDestroy(nvjpeg_state);
PADDLE_THROW(errors::Fatal(
"The provided mode is not supported for JPEG files on GPU"));
}
nvjpegImage_t out_image;
InitNvjpegImage(&out_image);
// create nvjpeg stream
if (nvjpeg_stream == nullptr) {
cudaStreamCreateWithFlags(&nvjpeg_stream, cudaStreamNonBlocking);
}
int sz = widths[0] * heights[0];
std::vector<int64_t> out_shape = {output_components, height, width};
out->Resize(phi::make_ddim(out_shape));
T* data = dev_ctx.template Alloc<T>(out);
for (int c = 0; c < output_components; c++) {
out_image.channel[c] = data + c * sz;
out_image.pitch[c] = width;
}
nvjpegStatus_t decode_status = phi::dynload::nvjpegDecode(nvjpeg_handle,
nvjpeg_state,
x_data,
x.numel(),
output_format,
&out_image,
nvjpeg_stream);
}
} // namespace phi
PD_REGISTER_KERNEL(decode_jpeg, // cuda_only
GPU,
ALL_LAYOUT,
phi::DecodeJpegKernel,
uint8_t) {
kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
}
#endif
|
7e3ddc285cdd77c85b61cbf090803edb6dfebebe.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "kernel.h"
#include <hip/hip_runtime.h>
#define THREADBLOCK_SIZE 512
__launch_bounds__(THREADBLOCK_SIZE) __global__ void TritonGatherKernel(
const int8_t** __restrict input_ptr_buffer,
const size_t* __restrict byte_size_buffer,
const size_t* __restrict byte_size_offset_buffer,
int8_t* __restrict output_buffer)
{
int request_idx = blockIdx.x;
int lane_id = threadIdx.x;
const int8_t* request_input_buffer = input_ptr_buffer[request_idx];
int byte_size = byte_size_buffer[request_idx];
int byte_size_offset = byte_size_offset_buffer[request_idx];
int8_t* output_buffer_with_offset = output_buffer + byte_size_offset;
if (((byte_size % 4) == 0) && (((uint64_t)request_input_buffer % 4) == 0) &&
(((uint64_t)output_buffer_with_offset % 4) == 0)) {
int32_t* input_4 = (int32_t*)request_input_buffer;
int32_t* output_4 = (int32_t*)output_buffer_with_offset;
int element_count = byte_size / 4;
for (int elem_id = lane_id; elem_id < element_count;
elem_id += THREADBLOCK_SIZE) {
output_4[elem_id] = input_4[elem_id];
}
} else {
for (int elem_id = lane_id; elem_id < byte_size;
elem_id += THREADBLOCK_SIZE) {
output_buffer_with_offset[elem_id] =
__ldg(request_input_buffer + elem_id);
}
}
}
#ifdef __cplusplus
extern "C" {
#endif
hipError_t
RunGatherKernel(
const int8_t** input_ptr_buffer, const size_t* byte_size_buffer,
const size_t* byte_size_offset_buffer, int8_t* output_buffer,
size_t request_count, hipStream_t stream)
{
hipLaunchKernelGGL(( TritonGatherKernel), dim3(request_count), dim3(THREADBLOCK_SIZE), 0, stream,
input_ptr_buffer, byte_size_buffer, byte_size_offset_buffer,
output_buffer);
return hipGetLastError();
}
#ifdef __cplusplus
}
#endif
| 7e3ddc285cdd77c85b61cbf090803edb6dfebebe.cu | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "kernel.h"
#include <cuda.h>
#define THREADBLOCK_SIZE 512
__launch_bounds__(THREADBLOCK_SIZE) __global__ void TritonGatherKernel(
const int8_t** __restrict input_ptr_buffer,
const size_t* __restrict byte_size_buffer,
const size_t* __restrict byte_size_offset_buffer,
int8_t* __restrict output_buffer)
{
int request_idx = blockIdx.x;
int lane_id = threadIdx.x;
const int8_t* request_input_buffer = input_ptr_buffer[request_idx];
int byte_size = byte_size_buffer[request_idx];
int byte_size_offset = byte_size_offset_buffer[request_idx];
int8_t* output_buffer_with_offset = output_buffer + byte_size_offset;
if (((byte_size % 4) == 0) && (((uint64_t)request_input_buffer % 4) == 0) &&
(((uint64_t)output_buffer_with_offset % 4) == 0)) {
int32_t* input_4 = (int32_t*)request_input_buffer;
int32_t* output_4 = (int32_t*)output_buffer_with_offset;
int element_count = byte_size / 4;
for (int elem_id = lane_id; elem_id < element_count;
elem_id += THREADBLOCK_SIZE) {
output_4[elem_id] = input_4[elem_id];
}
} else {
for (int elem_id = lane_id; elem_id < byte_size;
elem_id += THREADBLOCK_SIZE) {
output_buffer_with_offset[elem_id] =
__ldg(request_input_buffer + elem_id);
}
}
}
#ifdef __cplusplus
extern "C" {
#endif
cudaError_t
RunGatherKernel(
const int8_t** input_ptr_buffer, const size_t* byte_size_buffer,
const size_t* byte_size_offset_buffer, int8_t* output_buffer,
size_t request_count, cudaStream_t stream)
{
TritonGatherKernel<<<request_count, THREADBLOCK_SIZE, 0, stream>>>(
input_ptr_buffer, byte_size_buffer, byte_size_offset_buffer,
output_buffer);
return cudaGetLastError();
}
#ifdef __cplusplus
}
#endif
|
ade88fafb97cce22c25522a0c47a07b9f4f32bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cosineKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *outN = NULL;
hipMalloc(&outN, XSIZE*YSIZE);
float *outD1 = NULL;
hipMalloc(&outD1, XSIZE*YSIZE);
float *outD2 = NULL;
hipMalloc(&outD2, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cosineKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,outN,outD1,outD2,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cosineKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,outN,outD1,outD2,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cosineKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,outN,outD1,outD2,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ade88fafb97cce22c25522a0c47a07b9f4f32bb1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cosineKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *outN = NULL;
cudaMalloc(&outN, XSIZE*YSIZE);
float *outD1 = NULL;
cudaMalloc(&outD1, XSIZE*YSIZE);
float *outD2 = NULL;
cudaMalloc(&outD2, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cosineKernel<<<gridBlock,threadBlock>>>(a,b,outN,outD1,outD2,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cosineKernel<<<gridBlock,threadBlock>>>(a,b,outN,outD1,outD2,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cosineKernel<<<gridBlock,threadBlock>>>(a,b,outN,outD1,outD2,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
05e7ef6b74d7671bceaf770c7b1c7794c79509de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <reduce.h>
__device__ double update(double old,double opOutput,double *extraParams) {
return opOutput + old;
}
/**
An op on the device
@param d1 the first operator
@param d2 the second operator
*/
__device__ double op(double d1,double d2,double *extraParams) {
return fabsf(d1);
}
__device__ double op(double d1,double *extraParams) {
return d1;
}
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *params,double *result) {
return reduction;
}
extern "C"
__global__ void norm1_strided_double(int n, int xOffset,double *dx,int incx,double *params,double *result) {
transform(n,xOffset,dx,incx,params,result);
}
| 05e7ef6b74d7671bceaf770c7b1c7794c79509de.cu | #include <reduce.h>
__device__ double update(double old,double opOutput,double *extraParams) {
return opOutput + old;
}
/**
An op on the device
@param d1 the first operator
@param d2 the second operator
*/
__device__ double op(double d1,double d2,double *extraParams) {
return fabsf(d1);
}
__device__ double op(double d1,double *extraParams) {
return d1;
}
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *params,double *result) {
return reduction;
}
extern "C"
__global__ void norm1_strided_double(int n, int xOffset,double *dx,int incx,double *params,double *result) {
transform(n,xOffset,dx,incx,params,result);
}
|
4cad8158eed32e6f9b166843a0d6bb2de5372db4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <nvtext/normalize.hpp>
#include <text/utilities/tokenize_ops.cuh>
#include <thrust/for_each.h>
namespace nvtext
{
namespace detail
{
namespace
{
/**
* @brief Normalize spaces in a strings column.
*
* Repeated whitespace (code-point <= ' ') is replaced with a single space.
* Also, whitespace is trimmed from the beginning and end of each string.
*
* This functor can be called to compute the output size in bytes
* of each string and then called again to fill in the allocated buffer.
*/
struct normalize_spaces_fn
{
cudf::column_device_view const d_strings; // strings to normalize
int32_t const* d_offsets{}; // offsets into d_buffer
char* d_buffer{}; // output buffer for characters
__device__ int32_t operator()(cudf::size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
cudf::string_view single_space(" ",1);
auto const d_str = d_strings.element<cudf::string_view>(idx);
char* buffer = d_offsets ? d_buffer + d_offsets[idx] : nullptr;
char* optr = buffer; // running output pointer
int32_t nbytes = 0; // holds the number of bytes per output string
// create tokenizer for this string with whitespace delimiter (default)
characters_tokenizer tokenizer(d_str);
// this will retrieve tokens automatically skipping runs of whitespace
while( tokenizer.next_token() )
{
auto token_pos = tokenizer.token_byte_positions();
nbytes += token_pos.second - token_pos.first + 1; // token size plus a single space
if( optr )
{
cudf::string_view token( d_str.data() + token_pos.first, token_pos.second - token_pos.first );
if( optr != buffer ) // prepend space unless we are at the beginning
optr = cudf::strings::detail::copy_string(optr,single_space);
// write token to output buffer
optr = cudf::strings::detail::copy_string(optr,token); // copy token to output
}
}
return (nbytes>0) ? nbytes-1:0; // remove trailing space
}
};
} // namspace
// details API
std::unique_ptr<cudf::column> normalize_spaces( cudf::strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0 )
{
cudf::size_type strings_count = strings.size();
if( strings_count == 0 )
return cudf::make_empty_column(cudf::data_type{cudf::STRING});
// create device column
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy bitmask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr );
// create offsets by calculating size of each string for output
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
normalize_spaces_fn{d_strings} ); // this does size-only calc
auto offsets_column = cudf::strings::detail::make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build the chars column
cudf::size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = cudf::strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
// copy tokens to the chars buffer
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
normalize_spaces_fn{d_strings, d_offsets, d_chars} );
chars_column->set_null_count(0); // reset null count for child column
//
return cudf::make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> normalize_spaces( cudf::strings_column_view const& strings,
rmm::mr::device_memory_resource* mr )
{
return detail::normalize_spaces( strings, mr );
}
} // namespace nvtext
| 4cad8158eed32e6f9b166843a0d6bb2de5372db4.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <nvtext/normalize.hpp>
#include <text/utilities/tokenize_ops.cuh>
#include <thrust/for_each.h>
namespace nvtext
{
namespace detail
{
namespace
{
/**
* @brief Normalize spaces in a strings column.
*
* Repeated whitespace (code-point <= ' ') is replaced with a single space.
* Also, whitespace is trimmed from the beginning and end of each string.
*
* This functor can be called to compute the output size in bytes
* of each string and then called again to fill in the allocated buffer.
*/
struct normalize_spaces_fn
{
cudf::column_device_view const d_strings; // strings to normalize
int32_t const* d_offsets{}; // offsets into d_buffer
char* d_buffer{}; // output buffer for characters
__device__ int32_t operator()(cudf::size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
cudf::string_view single_space(" ",1);
auto const d_str = d_strings.element<cudf::string_view>(idx);
char* buffer = d_offsets ? d_buffer + d_offsets[idx] : nullptr;
char* optr = buffer; // running output pointer
int32_t nbytes = 0; // holds the number of bytes per output string
// create tokenizer for this string with whitespace delimiter (default)
characters_tokenizer tokenizer(d_str);
// this will retrieve tokens automatically skipping runs of whitespace
while( tokenizer.next_token() )
{
auto token_pos = tokenizer.token_byte_positions();
nbytes += token_pos.second - token_pos.first + 1; // token size plus a single space
if( optr )
{
cudf::string_view token( d_str.data() + token_pos.first, token_pos.second - token_pos.first );
if( optr != buffer ) // prepend space unless we are at the beginning
optr = cudf::strings::detail::copy_string(optr,single_space);
// write token to output buffer
optr = cudf::strings::detail::copy_string(optr,token); // copy token to output
}
}
return (nbytes>0) ? nbytes-1:0; // remove trailing space
}
};
} // namspace
// details API
std::unique_ptr<cudf::column> normalize_spaces( cudf::strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0 )
{
cudf::size_type strings_count = strings.size();
if( strings_count == 0 )
return cudf::make_empty_column(cudf::data_type{cudf::STRING});
// create device column
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy bitmask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr );
// create offsets by calculating size of each string for output
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
normalize_spaces_fn{d_strings} ); // this does size-only calc
auto offsets_column = cudf::strings::detail::make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build the chars column
cudf::size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = cudf::strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
// copy tokens to the chars buffer
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
normalize_spaces_fn{d_strings, d_offsets, d_chars} );
chars_column->set_null_count(0); // reset null count for child column
//
return cudf::make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> normalize_spaces( cudf::strings_column_view const& strings,
rmm::mr::device_memory_resource* mr )
{
return detail::normalize_spaces( strings, mr );
}
} // namespace nvtext
|
e5c036aa12a929895298334a8a8e22f1abcc9987.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .cuda.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core
{
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CUDAMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float3* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ float4* debugData;
__constant__ mat4 worldToSky;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// access
__host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { hipMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); }
__host__ void SetMaterialList( CUDAMaterial* p ) { hipMemcpyToSymbol( materials, &p, sizeof( void* ) ); }
__host__ void SetAreaLights( CoreLightTri* p ) { hipMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); }
__host__ void SetPointLights( CorePointLight* p ) { hipMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); }
__host__ void SetSpotLights( CoreSpotLight* p ) { hipMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); }
__host__ void SetDirectionalLights( CoreDirectionalLight* p ) { hipMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); }
__host__ void SetLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
hipMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) );
}
__host__ void SetARGB32Pixels( uint* p ) { hipMemcpyToSymbol( argb32, &p, sizeof( void* ) ); }
__host__ void SetARGB128Pixels( float4* p ) { hipMemcpyToSymbol( argb128, &p, sizeof( void* ) ); }
__host__ void SetNRM32Pixels( uint* p ) { hipMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); }
__host__ void SetSkyPixels( float3* p ) { hipMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); }
__host__ void SetSkySize( int w, int h ) { hipMemcpyToSymbol( skywidth, &w, sizeof( int ) ); hipMemcpyToSymbol( skyheight, &h, sizeof( int ) ); }
__host__ void SetWorldToSky( const mat4& worldToLight ) { hipMemcpyToSymbol( worldToSky, &worldToLight, sizeof( worldToSky ) ); }
__host__ void SetDebugData( float4* p ) { hipMemcpyToSymbol( debugData, &p, sizeof( void* ) ); }
// access
__host__ void SetGeometryEpsilon( float e ) { hipMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); }
__host__ void SetClampValue( float c ) { hipMemcpyToSymbol( clampValue, &c, sizeof( float ) ); }
// BDPT
/////////////////////////////////////////////////
/* LH2_DEVFUNC void copyPathState(const BiPathState orgin, BiPathState& target)
{
memcpy(&target, &orgin, sizeof(BiPathState));
} */
__global__ void InitIndexForConstructionLight_Kernel( int pathCount, uint* construcLightBuffer )
{
int jobIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (jobIndex >= pathCount) return;
construcLightBuffer[jobIndex] = jobIndex;
}
__host__ void InitIndexForConstructionLight( int pathCount, uint* construcLightBuffer )
{
const dim3 gridDim( NEXTMULTIPLEOF( pathCount, 256 ) / 256, 1 ), blockDim( 256, 1 );
InitIndexForConstructionLight_Kernel << <gridDim.x, 256 >> > (pathCount, construcLightBuffer);
}
///////////////////////////////////////////////////
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->constructionLightPos = pathCount; // remaining active paths
counters->constructionEyePos = 0;
counters->extendEyePath = 0;
counters->extendLightPath = 0;
counters->randomWalkRays = 0;
counters->visibilityRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersForPixels_Kernel()
{
if (threadIdx.x != 0) return;
counters->contribution_count = 0;
}
__host__ void InitCountersForPixels() { InitCountersForPixels_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { hipMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "finalize_shared.h"
#include "bsdf.h"
#include "constructionLightPos.h"
#include "constructionEyePos.h"
#include "extendEyePath.h"
#include "extendLightPath.h"
#include "connectionPath.h"
#include "finalizeContribution.h"
} // namespace lh2core
// EOF
| e5c036aa12a929895298334a8a8e22f1abcc9987.cu | /* .cuda.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include ".cuda.h"
namespace lh2core
{
// path tracing buffers and global variables
__constant__ CoreInstanceDesc* instanceDescriptors;
__constant__ CUDAMaterial* materials;
__constant__ CoreLightTri* areaLights;
__constant__ CorePointLight* pointLights;
__constant__ CoreSpotLight* spotLights;
__constant__ CoreDirectionalLight* directionalLights;
__constant__ int4 lightCounts; // area, point, spot, directional
__constant__ uchar4* argb32;
__constant__ float4* argb128;
__constant__ uchar4* nrm32;
__constant__ float3* skyPixels;
__constant__ int skywidth;
__constant__ int skyheight;
__constant__ float4* debugData;
__constant__ mat4 worldToSky;
// path tracer settings
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ float clampValue;
// access
__host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { cudaMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); }
__host__ void SetMaterialList( CUDAMaterial* p ) { cudaMemcpyToSymbol( materials, &p, sizeof( void* ) ); }
__host__ void SetAreaLights( CoreLightTri* p ) { cudaMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); }
__host__ void SetPointLights( CorePointLight* p ) { cudaMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); }
__host__ void SetSpotLights( CoreSpotLight* p ) { cudaMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); }
__host__ void SetDirectionalLights( CoreDirectionalLight* p ) { cudaMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); }
__host__ void SetLightCounts( int area, int point, int spot, int directional )
{
const int4 counts = make_int4( area, point, spot, directional );
cudaMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) );
}
__host__ void SetARGB32Pixels( uint* p ) { cudaMemcpyToSymbol( argb32, &p, sizeof( void* ) ); }
__host__ void SetARGB128Pixels( float4* p ) { cudaMemcpyToSymbol( argb128, &p, sizeof( void* ) ); }
__host__ void SetNRM32Pixels( uint* p ) { cudaMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); }
__host__ void SetSkyPixels( float3* p ) { cudaMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); }
__host__ void SetSkySize( int w, int h ) { cudaMemcpyToSymbol( skywidth, &w, sizeof( int ) ); cudaMemcpyToSymbol( skyheight, &h, sizeof( int ) ); }
__host__ void SetWorldToSky( const mat4& worldToLight ) { cudaMemcpyToSymbol( worldToSky, &worldToLight, sizeof( worldToSky ) ); }
__host__ void SetDebugData( float4* p ) { cudaMemcpyToSymbol( debugData, &p, sizeof( void* ) ); }
// access
__host__ void SetGeometryEpsilon( float e ) { cudaMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); }
__host__ void SetClampValue( float c ) { cudaMemcpyToSymbol( clampValue, &c, sizeof( float ) ); }
// BDPT
/////////////////////////////////////////////////
/* LH2_DEVFUNC void copyPathState(const BiPathState orgin, BiPathState& target)
{
memcpy(&target, &orgin, sizeof(BiPathState));
} */
__global__ void InitIndexForConstructionLight_Kernel( int pathCount, uint* construcLightBuffer )
{
int jobIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (jobIndex >= pathCount) return;
construcLightBuffer[jobIndex] = jobIndex;
}
__host__ void InitIndexForConstructionLight( int pathCount, uint* construcLightBuffer )
{
const dim3 gridDim( NEXTMULTIPLEOF( pathCount, 256 ) / 256, 1 ), blockDim( 256, 1 );
InitIndexForConstructionLight_Kernel << <gridDim.x, 256 >> > (pathCount, construcLightBuffer);
}
///////////////////////////////////////////////////
// counters for persistent threads
static __device__ Counters* counters;
__global__ void InitCountersForExtend_Kernel( int pathCount )
{
if (threadIdx.x != 0) return;
counters->constructionLightPos = pathCount; // remaining active paths
counters->constructionEyePos = 0;
counters->extendEyePath = 0;
counters->extendLightPath = 0;
counters->randomWalkRays = 0;
counters->visibilityRays = 0;
}
__host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); }
__global__ void InitCountersForPixels_Kernel()
{
if (threadIdx.x != 0) return;
counters->contribution_count = 0;
}
__host__ void InitCountersForPixels() { InitCountersForPixels_Kernel << <1, 32 >> > (); }
__host__ void SetCounters( Counters* p ) { cudaMemcpyToSymbol( counters, &p, sizeof( void* ) ); }
// functional blocks
#include "tools_shared.h"
#include "sampling_shared.h"
#include "material_shared.h"
#include "lights_shared.h"
#include "finalize_shared.h"
#include "bsdf.h"
#include "constructionLightPos.h"
#include "constructionEyePos.h"
#include "extendEyePath.h"
#include "extendLightPath.h"
#include "connectionPath.h"
#include "finalizeContribution.h"
} // namespace lh2core
// EOF
|
65aa0902611ede84a974f3e83976b02a1b6c946e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _DELTA_KERNEL_H_
#define _DELTA_KERNEL_H_
#include <stdio.h>
#define DELTA 1.0
#define INF 1e20
#define GN 262144
#define BUCKETSIZE 4096*8 //era 4096*8
#define NUMBUCKETS 1024
#define DOWN 0
#define UP 1
#define RIGHT 2
#define LEFT 3
//#define EMULATION
#ifdef EMULATION
#define DEBUG(x...) printf(x)
#else
#define DEBUG(x...)
#endif
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
texture<float, 2, hipReadModeElementType> mytex0;
texture<float, 2, hipReadModeElementType> mytex1;
texture<float, 2, hipReadModeElementType> mytex2;
texture<float, 2, hipReadModeElementType> mytex3;
__device__ void
demptyKernel(){
}
/*
__global__ void
deltaKernel( float* g_idata, float* g_odata)
{
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
// const unsigned int num_threads = blockDim.x;
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// read in input data from global memory
sdata[tid] = tex2D(tex,2,2);
__syncthreads();
// perform some computations
sdata[tid] = (float) sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
*/
__global__ void
labelKernel (int i, int* B,int* BCount,int* BPos, int tw,int th, int* RLoc,int* R, float* dR, float* d,int* vBucketMap){
//ver se no precisa colocar 4 BiCount em algum lugar...
//todo: try to increase speed using shared memory for RLoc... think more about it (maybe RLoc is too big for shared memory)
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int BiCount = BPos[i];
int node, row, col, pos,index;
float cost,f1,f2,fmin;
//cleaning R
//4 times because each node can be reached from up, down, left and right directions (and more 4 times because for each node 4 more are open)
// DEBUG("BiCount %d lastpos %d\n",BiCount,4*(BiCount-1)+3);
for(int k=0; (num_threads*k + tid) < 16*BiCount;k++){
index = 4*(num_threads*k + tid);
R[index ]=-1;
R[index+1]=-1;
R[index+2]=-1;
R[index+3]=-1;
dR[index ]=INF;
dR[index+1]=INF;
dR[index+2]=INF;
dR[index+3]=INF;
}
__syncthreads();
const int dx[4]={0,0,1,-1};
const int dy[4]={1,-1,0,0};
/* //down direction
dx[DOWN]= 0;
dy[DOWN]= 1;
//up direction
dx[UP]= 0;
dy[UP]= -1;
//right direction
dx[RIGHT]= 1;
dy[RIGHT]= 0;
//left direction
dx[LEFT]= -1;
dy[LEFT]= 0;*/
for(int k=0; num_threads*k + tid < BiCount;k++){
node = B [ BUCKETSIZE*i + num_threads*k + tid];
if(node!=-1){
DEBUG("(tid %d) node %d(from B[%d] pos %d)\n",tid,node,i,num_threads*k + tid);
for(int j=0;j<4;j++){
//TRY TO OPTIMIZE THIS PART REMOVING THE IF AND SETTING IMPOSSIBLE EDGE VALUES TO INFINITY
switch(j){
case 0:
cost = tex2D(mytex0,node%tw,node/tw);
break;
case 1:
cost = tex2D(mytex1,node%tw,node/tw);
break;
case 2:
cost = tex2D(mytex2,node%tw,node/tw);
break;
case 3:
cost = tex2D(mytex3,node%tw,node/tw);
break;
}
// cost = tex2D(mytex[j],node%tw,node/tw);//todo: change texture
row = node/tw + dy[j];
col = node%tw + dx[j];
if( (row>=0) && (row < th) && (col >= 0) && (col < tw) ){
RLoc [ row*tw + col ] = 4*(num_threads*k + tid)+j;
DEBUG("(tid %d)Connecting node %d to be processed by %d\n",tid,row*tw+col,RLoc [ row*tw + col ]);
}
}
}
}
__syncthreads();
//copy Edges to R
for(int k=0; num_threads*k + tid < BiCount;k++){
node = B [ BUCKETSIZE*i + num_threads*k + tid];
if(node!=-1){
for(int j=0;j<4;j++){
switch(j){
case 0:
cost = tex2D(mytex0,node%tw,node/tw);
break;
case 1:
cost = tex2D(mytex1,node%tw,node/tw);
break;
case 2:
cost = tex2D(mytex2,node%tw,node/tw);
break;
case 3:
cost = tex2D(mytex3,node%tw,node/tw);
break;
}
row = node/tw + dy[j];
col = node%tw + dx[j];
DEBUG("Pre-candidate in R %d (d=%f)\n",row*tw + col,d[node]+cost,dR [4*RLoc[row*tw + col]+j]);
if( (row>=0) && (row < th) && (col >= 0) && (col < tw) ){
DEBUG("Candidate in R %d (d=%f)\n",row*tw + col,d[node]+cost,dR [4*RLoc[row*tw + col]+j]);
if((cost<=DELTA)&&( d[node]+cost < d[row*tw + col])){
R [4*RLoc[row*tw + col]+j] = row*tw + col;
dR[4*RLoc[row*tw + col]+j] = d[node]+cost;
DEBUG("New node in R %d (d=%f,e=%f) in pos %d\n",R [4*RLoc[row*tw + col]+j],dR [4*RLoc[row*tw + col]+j],cost,4*RLoc[row*tw + col]+j);
vBucketMap[node]=-1;
}
}
}
}
}
__syncthreads();
//gathering data to find the minimum cost way to get to node n
//TODO: OPTIMIZE IN SUCH A WAY IT WON'T BE NEEDED TO GO THROUGH THE 4 EDGES, since they store the same value
for(int k=0; (num_threads*k + tid) < 16*BiCount;k++){
f1 = fminf( dR[4*(num_threads*k + tid) ], dR[4*(num_threads*k + tid)+1] );
f2 = fminf( dR[4*(num_threads*k + tid)+2], dR[4*(num_threads*k + tid)+3] );
fmin = fminf(f1,f2);
dR[4*(num_threads*k + tid) ]=fmin;
dR[4*(num_threads*k + tid)+1]=fmin;
dR[4*(num_threads*k + tid)+2]=fmin;
dR[4*(num_threads*k + tid)+3]=fmin;
}
__syncthreads();
// demptyKernel();
// __syncthreads();
}
__global__ void
copyB2SKernel(int i, int* B, int* BCount,int* BPos, int* S, int* SCount){
//TODO: optimize this code
//there's an optimized way of doing this, which is by only
//storing SCount = Scount+ BCount, as output
//and controlling with local variables thread positions
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int pos;
int BiCount = BCount[i];
for(int k=0; num_threads*k + tid < BiCount;k++){
if(B[i*BUCKETSIZE+num_threads*k+tid]!=-1){
pos = atomicAdd(&SCount[0],1);
S[pos] = B[i*BUCKETSIZE+num_threads*k+tid];
}
}
__syncthreads();
BCount[i]=0;
BPos[i]=0;
__syncthreads();
// DEBUG("(tid %d) SCount %d\n",tid,SCount[0]);
}
//Parallel relax edges
__global__ void
relaxKernel( int RCount, int* B,int* BCount,int* BPos, int* RLoc,int* R,float* dR,float* d, int* vBucketLoc, int* vBucketMap, float* deb){
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int v,bn,bn_old, index;
float x;
// int myAdd=0;
deb[20]= (float)RCount;
for(int k=0; num_threads*k + tid < RCount;k++){
index = num_threads*k + tid;
deb[2*index]= R[index];
deb[2*index+1]= dR[index];
}
// DEBUG("relaxing RCount %d\n",RCount);
//remove node from old bucket
// RCount = RCount /4;
for(int k=0; num_threads*k + tid < RCount;k++){
index = num_threads*k + tid;
// deb[index]= R[index];
if(R[index]!=-1){
x = dR[index];
v = R[index];
if(x<d[v]){
bn_old = vBucketMap[v];
if (bn_old != -1) {
int oldIndex = bn_old*BUCKETSIZE+vBucketLoc[v];
//
B[oldIndex] = -1;//GN;
int oldc = atomicSub(&BCount[bn_old],1);
// printf("Removing %d from %d(%d)\n",v,bn_old,oldc);
}
}
}
}
__syncthreads();
for(int k=0; num_threads*k + tid < RCount;k++){
if(R[num_threads*k + tid]!=-1){
//deb[0]= (float) (BUCKETSIZE);
x = dR[num_threads*k + tid];
v = R[num_threads*k + tid];
if(x < d[v]){
bn = (int) (dR[num_threads*k + tid]/DELTA);
// printf("Bn %d\n",bn);
atomicAdd(&BCount[bn],1);
int pos = atomicAdd(&BPos[bn],1);
DEBUG("Pos %d BCount[%d] %d node %d (x=%f)\n",pos,bn,BPos[bn],v,x);
B[bn*BUCKETSIZE+pos] = v;
d[v] = x;
vBucketLoc[v] = pos;
vBucketMap[v] = bn;
RLoc[v]=-1;
//only debug info
// for(int i=0;i<BPos[bn];i++){
// DEBUG("B(%d)=%d ",i,B[bn*BUCKETSIZE+i]);
// }
// DEBUG("\n");
}
}
}
__syncthreads();
}
__global__ void
labelHeavyKernel (int i, int* S,int* SCount,int tw, int* RLoc,int* R, float* dR, float* d,int* vBucketMap){
//todo: try to increase speed using shared memory for RLoc... think more about it (maybe RLoc is too big for shared memory)
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int mySCount = SCount[0];
DEBUG("Heavy mySCount %d\n",mySCount);
for(int k=0; num_threads*k + tid < mySCount;k++){
int node = S [ num_threads*k + tid];
//TODO FIX HEAVY EDGES TEX...
float downCost = tex2D(mytex0,node%tw,node/tw);//todo: repeat for left,right and down
int downRow = node/tw +1;
int downCol = node%tw;
int down = downRow*tw+downCol;
DEBUG("Node %d %f\n", node,down);
if(down<512*512){
RLoc[ down ] = num_threads*k + tid;
R[num_threads*k + tid]=-1;
}
// dR[num_threads*k + tid]=-1.0;
}
DEBUG("done (i=%d) %d\n",i,tid);
__syncthreads();
//copy Edges to R
for(int k=0; num_threads*k + tid < mySCount;k++){
int node = S [ num_threads*k + tid];
//TODO FIX HEAVY EDGES TEX...
float downCost = tex2D(mytex0,node%tw,node/tw);//todo: repeat for left,right and down
//float edgeCost = up;
int downRow = node/tw +1;
int downCol = node%tw;
int down = downRow*tw+downCol;
if(downCost>DELTA){
if(down<512*512){
R[RLoc[down]] = down;
dR[RLoc[down]] = d[node]+downCost;
DEBUG("Heavy new node in R %d (d=%f) from node %d\n",down,dR[RLoc[down]],node);
vBucketMap[node]=-1;
}
}
}
__syncthreads();
//todo: empty S here in order to be faster
}
__global__ void
emptyKernel(){
}
// B is the bucket i vector
// RLoc[n] stores the position of node n in R (so that if more than one attempt to update
// the distance to node n is made at the same time, it can be shifted to 0,1,2 or 3 in the position of R)
__global__ void
labelBisKernel( int i, int* B,int * Bi,int* R, int * RLoc,float* dR, int* S, int BSize,int* BCount, int* BiCount, int tw , float* d,int* vBucketLoc, int* vBucketMap) {
int k;
int v,bn, bn_old;
float x;
int pos;
// BCount[i]--;
// *BiCount = 1000;
//This is the labelling part
//Firstly, we will mark the position for node n
//If n appears more than once while expanding Bi, and two threads try to
//set different positions for the same node,
//it is granted that only one of them will succeed,
//hence, after the labelling part, each node of Bi will
//be set to only one place in R
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
// extern __shared__
__shared__ int Btemp[512];
// for(int k=0;num_threads * k < BiCount; k++){
// if(tid<BiCount)//tex2D(tex,2,2))
// int Btemptid;
//Btemp[0]=tex2D(tex,0,0);
// Btemp[tid]=tex2D(tex,node%tw,node/tw);//node%tw,node/tw);//B[tid];
// __syncthreads();
// }
// RLoc[tid]=-1;
//labelling part finishes here
//now we are set to copy edges to R
//for(k=0;k<10;k++){
int controlVar = 1;
//while(BCount[0]!=0){
//for(int g=0;g<B2;g++){
//printf("Tid %d Bcount[%d] %d\n",tid,i,BCount[i]);
// controlVar=0;
if( tid < BCount[i]){
// printf("Tid %d Bcount[%d] %d\n",tid,i,BCount[i]);
while(BCount[i]!=0){
// printf("Tid %d Inside\n",tid,i,BCount[i]);
int node = B[i*BUCKETSIZE+tid];
// Btemptid=tex2D(tex,node%tw,node/tw);
// Btemp[tid]=tex2D(tex,0,0);
// __syncthreads();
// if(((node%tw)<1) && ((node%tw)>=0) && ((node/tw)<1) && ((node/tw)>=0))
int b = node/tw;
int a = node%tw;
//WARNING TEX IS WRONG HERE
Btemp[tid]=tex2D(mytex0,a,b);
//printf("(tid %d)Tex %d\n",tid,Btemp[tid]);
//__syncthreads();
//printf("Tid %d Still alive\n",tid);
RLoc[Btemp[tid]]=tid;
// __syncthreads();
//printf("Tid %d Still alive\n",tid);
R[RLoc[Btemp[tid]]] = Btemp[tid];
dR[RLoc[Btemp[tid]]] = d[node]+1.1;
// __syncthreads();
//copy B[i] to S
//duplicates are allowed... they will cause no problem when creating heavy R
//two threads might be doing the same thing at the same time, which might cause
//an access conflit, but that's all
S[tid] = B[i*BUCKETSIZE+tid];
//TODO: check if clear Bi is correct
atomicSub(&BCount[i],1);
// BCount[i]=0;
controlVar=0;
//Parallel relax light edges
v = R[tid];
bn = (int) (dR[tid]/DELTA);
bn_old = vBucketMap[v];
// __syncthreads();
// d[tid]=0.0+bn;
// d[tid] = __int2float_rn(bn_old)+20;
// printf("Hello world (%d) bn=%d bn_old=%d\n",tid,bn,bn_old);
//remove v from old bucket
if (bn_old != -1) {
B[bn_old*BSize+vBucketLoc[v]] = GN;
BCount[bn_old]--;
}
//insert v in new bucket
//TODO: fix BCount
// int BCount = 100;
x = dR[tid];//+BCount[i]+i;
pos = BCount[i] + RLoc[v];
//printf("(tid %d) bn %d pos %d totpos %d v%d\n",tid,bn,pos,bn*BUCKETSIZE+pos,v);
B[bn*BUCKETSIZE+pos] = v;
d[v] = x;
vBucketLoc[v] = pos;
vBucketMap[v] = bn;
RLoc[v]=-1;
atomicAdd(&BCount[bn],1);
// BCount[bn]++;
// __syncthreads();
}
}
// }
}
#endif // #ifndef _MEMORY_KERNEL_H_
| 65aa0902611ede84a974f3e83976b02a1b6c946e.cu | #ifndef _DELTA_KERNEL_H_
#define _DELTA_KERNEL_H_
#include <stdio.h>
#define DELTA 1.0
#define INF 1e20
#define GN 262144
#define BUCKETSIZE 4096*8 //era 4096*8
#define NUMBUCKETS 1024
#define DOWN 0
#define UP 1
#define RIGHT 2
#define LEFT 3
//#define EMULATION
#ifdef EMULATION
#define DEBUG(x...) printf(x)
#else
#define DEBUG(x...)
#endif
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
texture<float, 2, cudaReadModeElementType> mytex0;
texture<float, 2, cudaReadModeElementType> mytex1;
texture<float, 2, cudaReadModeElementType> mytex2;
texture<float, 2, cudaReadModeElementType> mytex3;
__device__ void
demptyKernel(){
}
/*
__global__ void
deltaKernel( float* g_idata, float* g_odata)
{
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
// const unsigned int num_threads = blockDim.x;
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// read in input data from global memory
sdata[tid] = tex2D(tex,2,2);
__syncthreads();
// perform some computations
sdata[tid] = (float) sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
*/
__global__ void
labelKernel (int i, int* B,int* BCount,int* BPos, int tw,int th, int* RLoc,int* R, float* dR, float* d,int* vBucketMap){
//ver se não precisa colocar 4 BiCount em algum lugar...
//todo: try to increase speed using shared memory for RLoc... think more about it (maybe RLoc is too big for shared memory)
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int BiCount = BPos[i];
int node, row, col, pos,index;
float cost,f1,f2,fmin;
//cleaning R
//4 times because each node can be reached from up, down, left and right directions (and more 4 times because for each node 4 more are open)
// DEBUG("BiCount %d lastpos %d\n",BiCount,4*(BiCount-1)+3);
for(int k=0; (num_threads*k + tid) < 16*BiCount;k++){
index = 4*(num_threads*k + tid);
R[index ]=-1;
R[index+1]=-1;
R[index+2]=-1;
R[index+3]=-1;
dR[index ]=INF;
dR[index+1]=INF;
dR[index+2]=INF;
dR[index+3]=INF;
}
__syncthreads();
const int dx[4]={0,0,1,-1};
const int dy[4]={1,-1,0,0};
/* //down direction
dx[DOWN]= 0;
dy[DOWN]= 1;
//up direction
dx[UP]= 0;
dy[UP]= -1;
//right direction
dx[RIGHT]= 1;
dy[RIGHT]= 0;
//left direction
dx[LEFT]= -1;
dy[LEFT]= 0;*/
for(int k=0; num_threads*k + tid < BiCount;k++){
node = B [ BUCKETSIZE*i + num_threads*k + tid];
if(node!=-1){
DEBUG("(tid %d) node %d(from B[%d] pos %d)\n",tid,node,i,num_threads*k + tid);
for(int j=0;j<4;j++){
//TRY TO OPTIMIZE THIS PART REMOVING THE IF AND SETTING IMPOSSIBLE EDGE VALUES TO INFINITY
switch(j){
case 0:
cost = tex2D(mytex0,node%tw,node/tw);
break;
case 1:
cost = tex2D(mytex1,node%tw,node/tw);
break;
case 2:
cost = tex2D(mytex2,node%tw,node/tw);
break;
case 3:
cost = tex2D(mytex3,node%tw,node/tw);
break;
}
// cost = tex2D(mytex[j],node%tw,node/tw);//todo: change texture
row = node/tw + dy[j];
col = node%tw + dx[j];
if( (row>=0) && (row < th) && (col >= 0) && (col < tw) ){
RLoc [ row*tw + col ] = 4*(num_threads*k + tid)+j;
DEBUG("(tid %d)Connecting node %d to be processed by %d\n",tid,row*tw+col,RLoc [ row*tw + col ]);
}
}
}
}
__syncthreads();
//copy Edges to R
for(int k=0; num_threads*k + tid < BiCount;k++){
node = B [ BUCKETSIZE*i + num_threads*k + tid];
if(node!=-1){
for(int j=0;j<4;j++){
switch(j){
case 0:
cost = tex2D(mytex0,node%tw,node/tw);
break;
case 1:
cost = tex2D(mytex1,node%tw,node/tw);
break;
case 2:
cost = tex2D(mytex2,node%tw,node/tw);
break;
case 3:
cost = tex2D(mytex3,node%tw,node/tw);
break;
}
row = node/tw + dy[j];
col = node%tw + dx[j];
DEBUG("Pre-candidate in R %d (d=%f)\n",row*tw + col,d[node]+cost,dR [4*RLoc[row*tw + col]+j]);
if( (row>=0) && (row < th) && (col >= 0) && (col < tw) ){
DEBUG("Candidate in R %d (d=%f)\n",row*tw + col,d[node]+cost,dR [4*RLoc[row*tw + col]+j]);
if((cost<=DELTA)&&( d[node]+cost < d[row*tw + col])){
R [4*RLoc[row*tw + col]+j] = row*tw + col;
dR[4*RLoc[row*tw + col]+j] = d[node]+cost;
DEBUG("New node in R %d (d=%f,e=%f) in pos %d\n",R [4*RLoc[row*tw + col]+j],dR [4*RLoc[row*tw + col]+j],cost,4*RLoc[row*tw + col]+j);
vBucketMap[node]=-1;
}
}
}
}
}
__syncthreads();
//gathering data to find the minimum cost way to get to node n
//TODO: OPTIMIZE IN SUCH A WAY IT WON'T BE NEEDED TO GO THROUGH THE 4 EDGES, since they store the same value
for(int k=0; (num_threads*k + tid) < 16*BiCount;k++){
f1 = fminf( dR[4*(num_threads*k + tid) ], dR[4*(num_threads*k + tid)+1] );
f2 = fminf( dR[4*(num_threads*k + tid)+2], dR[4*(num_threads*k + tid)+3] );
fmin = fminf(f1,f2);
dR[4*(num_threads*k + tid) ]=fmin;
dR[4*(num_threads*k + tid)+1]=fmin;
dR[4*(num_threads*k + tid)+2]=fmin;
dR[4*(num_threads*k + tid)+3]=fmin;
}
__syncthreads();
// demptyKernel();
// __syncthreads();
}
__global__ void
copyB2SKernel(int i, int* B, int* BCount,int* BPos, int* S, int* SCount){
//TODO: optimize this code
//there's an optimized way of doing this, which is by only
//storing SCount = Scount+ BCount, as output
//and controlling with local variables thread positions
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int pos;
int BiCount = BCount[i];
for(int k=0; num_threads*k + tid < BiCount;k++){
if(B[i*BUCKETSIZE+num_threads*k+tid]!=-1){
pos = atomicAdd(&SCount[0],1);
S[pos] = B[i*BUCKETSIZE+num_threads*k+tid];
}
}
__syncthreads();
BCount[i]=0;
BPos[i]=0;
__syncthreads();
// DEBUG("(tid %d) SCount %d\n",tid,SCount[0]);
}
//Parallel relax edges
__global__ void
relaxKernel( int RCount, int* B,int* BCount,int* BPos, int* RLoc,int* R,float* dR,float* d, int* vBucketLoc, int* vBucketMap, float* deb){
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int v,bn,bn_old, index;
float x;
// int myAdd=0;
deb[20]= (float)RCount;
for(int k=0; num_threads*k + tid < RCount;k++){
index = num_threads*k + tid;
deb[2*index]= R[index];
deb[2*index+1]= dR[index];
}
// DEBUG("relaxing RCount %d\n",RCount);
//remove node from old bucket
// RCount = RCount /4;
for(int k=0; num_threads*k + tid < RCount;k++){
index = num_threads*k + tid;
// deb[index]= R[index];
if(R[index]!=-1){
x = dR[index];
v = R[index];
if(x<d[v]){
bn_old = vBucketMap[v];
if (bn_old != -1) {
int oldIndex = bn_old*BUCKETSIZE+vBucketLoc[v];
//
B[oldIndex] = -1;//GN;
int oldc = atomicSub(&BCount[bn_old],1);
// printf("Removing %d from %d(%d)\n",v,bn_old,oldc);
}
}
}
}
__syncthreads();
for(int k=0; num_threads*k + tid < RCount;k++){
if(R[num_threads*k + tid]!=-1){
//deb[0]= (float) (BUCKETSIZE);
x = dR[num_threads*k + tid];
v = R[num_threads*k + tid];
if(x < d[v]){
bn = (int) (dR[num_threads*k + tid]/DELTA);
// printf("Bn %d\n",bn);
atomicAdd(&BCount[bn],1);
int pos = atomicAdd(&BPos[bn],1);
DEBUG("Pos %d BCount[%d] %d node %d (x=%f)\n",pos,bn,BPos[bn],v,x);
B[bn*BUCKETSIZE+pos] = v;
d[v] = x;
vBucketLoc[v] = pos;
vBucketMap[v] = bn;
RLoc[v]=-1;
//only debug info
// for(int i=0;i<BPos[bn];i++){
// DEBUG("B(%d)=%d ",i,B[bn*BUCKETSIZE+i]);
// }
// DEBUG("\n");
}
}
}
__syncthreads();
}
__global__ void
labelHeavyKernel (int i, int* S,int* SCount,int tw, int* RLoc,int* R, float* dR, float* d,int* vBucketMap){
//todo: try to increase speed using shared memory for RLoc... think more about it (maybe RLoc is too big for shared memory)
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
int mySCount = SCount[0];
DEBUG("Heavy mySCount %d\n",mySCount);
for(int k=0; num_threads*k + tid < mySCount;k++){
int node = S [ num_threads*k + tid];
//TODO FIX HEAVY EDGES TEX...
float downCost = tex2D(mytex0,node%tw,node/tw);//todo: repeat for left,right and down
int downRow = node/tw +1;
int downCol = node%tw;
int down = downRow*tw+downCol;
DEBUG("Node %d %f\n", node,down);
if(down<512*512){
RLoc[ down ] = num_threads*k + tid;
R[num_threads*k + tid]=-1;
}
// dR[num_threads*k + tid]=-1.0;
}
DEBUG("done (i=%d) %d\n",i,tid);
__syncthreads();
//copy Edges to R
for(int k=0; num_threads*k + tid < mySCount;k++){
int node = S [ num_threads*k + tid];
//TODO FIX HEAVY EDGES TEX...
float downCost = tex2D(mytex0,node%tw,node/tw);//todo: repeat for left,right and down
//float edgeCost = up;
int downRow = node/tw +1;
int downCol = node%tw;
int down = downRow*tw+downCol;
if(downCost>DELTA){
if(down<512*512){
R[RLoc[down]] = down;
dR[RLoc[down]] = d[node]+downCost;
DEBUG("Heavy new node in R %d (d=%f) from node %d\n",down,dR[RLoc[down]],node);
vBucketMap[node]=-1;
}
}
}
__syncthreads();
//todo: empty S here in order to be faster
}
__global__ void
emptyKernel(){
}
// B is the bucket i vector
// RLoc[n] stores the position of node n in R (so that if more than one attempt to update
// the distance to node n is made at the same time, it can be shifted to 0,1,2 or 3 in the position of R)
__global__ void
labelBisKernel( int i, int* B,int * Bi,int* R, int * RLoc,float* dR, int* S, int BSize,int* BCount, int* BiCount, int tw , float* d,int* vBucketLoc, int* vBucketMap) {
int k;
int v,bn, bn_old;
float x;
int pos;
// BCount[i]--;
// *BiCount = 1000;
//This is the labelling part
//Firstly, we will mark the position for node n
//If n appears more than once while expanding Bi, and two threads try to
//set different positions for the same node,
//it is granted that only one of them will succeed,
//hence, after the labelling part, each node of Bi will
//be set to only one place in R
const unsigned int tid = threadIdx.x;
const unsigned int num_threads = blockDim.x;
// extern __shared__
__shared__ int Btemp[512];
// for(int k=0;num_threads * k < BiCount; k++){
// if(tid<BiCount)//tex2D(tex,2,2))
// int Btemptid;
//Btemp[0]=tex2D(tex,0,0);
// Btemp[tid]=tex2D(tex,node%tw,node/tw);//node%tw,node/tw);//B[tid];
// __syncthreads();
// }
// RLoc[tid]=-1;
//labelling part finishes here
//now we are set to copy edges to R
//for(k=0;k<10;k++){
int controlVar = 1;
//while(BCount[0]!=0){
//for(int g=0;g<B2;g++){
//printf("Tid %d Bcount[%d] %d\n",tid,i,BCount[i]);
// controlVar=0;
if( tid < BCount[i]){
// printf("Tid %d Bcount[%d] %d\n",tid,i,BCount[i]);
while(BCount[i]!=0){
// printf("Tid %d Inside\n",tid,i,BCount[i]);
int node = B[i*BUCKETSIZE+tid];
// Btemptid=tex2D(tex,node%tw,node/tw);
// Btemp[tid]=tex2D(tex,0,0);
// __syncthreads();
// if(((node%tw)<1) && ((node%tw)>=0) && ((node/tw)<1) && ((node/tw)>=0))
int b = node/tw;
int a = node%tw;
//WARNING TEX IS WRONG HERE
Btemp[tid]=tex2D(mytex0,a,b);
//printf("(tid %d)Tex %d\n",tid,Btemp[tid]);
//__syncthreads();
//printf("Tid %d Still alive\n",tid);
RLoc[Btemp[tid]]=tid;
// __syncthreads();
//printf("Tid %d Still alive\n",tid);
R[RLoc[Btemp[tid]]] = Btemp[tid];
dR[RLoc[Btemp[tid]]] = d[node]+1.1;
// __syncthreads();
//copy B[i] to S
//duplicates are allowed... they will cause no problem when creating heavy R
//two threads might be doing the same thing at the same time, which might cause
//an access conflit, but that's all
S[tid] = B[i*BUCKETSIZE+tid];
//TODO: check if clear Bi is correct
atomicSub(&BCount[i],1);
// BCount[i]=0;
controlVar=0;
//Parallel relax light edges
v = R[tid];
bn = (int) (dR[tid]/DELTA);
bn_old = vBucketMap[v];
// __syncthreads();
// d[tid]=0.0+bn;
// d[tid] = __int2float_rn(bn_old)+20;
// printf("Hello world (%d) bn=%d bn_old=%d\n",tid,bn,bn_old);
//remove v from old bucket
if (bn_old != -1) {
B[bn_old*BSize+vBucketLoc[v]] = GN;
BCount[bn_old]--;
}
//insert v in new bucket
//TODO: fix BCount
// int BCount = 100;
x = dR[tid];//+BCount[i]+i;
pos = BCount[i] + RLoc[v];
//printf("(tid %d) bn %d pos %d totpos %d v%d\n",tid,bn,pos,bn*BUCKETSIZE+pos,v);
B[bn*BUCKETSIZE+pos] = v;
d[v] = x;
vBucketLoc[v] = pos;
vBucketMap[v] = bn;
RLoc[v]=-1;
atomicAdd(&BCount[bn],1);
// BCount[bn]++;
// __syncthreads();
}
}
// }
}
#endif // #ifndef _MEMORY_KERNEL_H_
|
faaa7b5070a5bb24c5749f028cdea4655db90f10.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
struct tanhupdateOutput_functor
{
__device__ void operator()(float *output, const float *input) const
{
*output = tanh(*input);
}
};
void THNN_CudaTanh_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, tanhupdateOutput_functor());
}
struct tanhupdateGradInput_functor
{
__device__ void operator()(float *gradInput, const float *output, const float *gradOutput) const
{
*gradInput = *gradOutput * (1 - *output * *output);
}
};
void THNN_CudaTanh_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, output);
THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, tanhupdateGradInput_functor());
}
| faaa7b5070a5bb24c5749f028cdea4655db90f10.cu | #include "THCUNN.h"
struct tanhupdateOutput_functor
{
__device__ void operator()(float *output, const float *input) const
{
*output = tanh(*input);
}
};
void THNN_CudaTanh_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, tanhupdateOutput_functor());
}
struct tanhupdateGradInput_functor
{
__device__ void operator()(float *gradInput, const float *output, const float *gradOutput) const
{
*gradInput = *gradOutput * (1 - *output * *output);
}
};
void THNN_CudaTanh_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, output);
THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, tanhupdateGradInput_functor());
}
|
775745281d1fc3ef914fb4371bbd7ab4ed95bb02.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 775745281d1fc3ef914fb4371bbd7ab4ed95bb02.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
1c29079c198930b4be6666d58ff40c2e36f8e3d2.hip | // !!! This is a file automatically generated by hipify!!!
// author: Felice Pantaleo, CERN, 2018
#include <cassert>
#include <iostream>
#include <new>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "HeterogeneousCore/CUDAUtilities/interface/GPUSimpleVector.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h"
__global__ void vector_pushback(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->push_back(index);
}
__global__ void vector_reset(GPU::SimpleVector<int> *foo) { foo->reset(); }
__global__ void vector_emplace_back(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->emplace_back(index);
}
int main() {
exitSansCUDADevices();
auto maxN = 10000;
GPU::SimpleVector<int> *obj_ptr = nullptr;
GPU::SimpleVector<int> *d_obj_ptr = nullptr;
GPU::SimpleVector<int> *tmp_obj_ptr = nullptr;
int *data_ptr = nullptr;
int *d_data_ptr = nullptr;
cudaCheck(hipHostMalloc(&obj_ptr, sizeof(GPU::SimpleVector<int>)));
cudaCheck(hipHostMalloc(&data_ptr, maxN * sizeof(int)));
cudaCheck(hipMalloc(&d_data_ptr, maxN * sizeof(int)));
auto v = GPU::make_SimpleVector(obj_ptr, maxN, data_ptr);
cudaCheck(hipHostMalloc(&tmp_obj_ptr, sizeof(GPU::SimpleVector<int>)));
GPU::make_SimpleVector(tmp_obj_ptr, maxN, d_data_ptr);
assert(tmp_obj_ptr->size() == 0);
assert(tmp_obj_ptr->capacity() == static_cast<int>(maxN));
cudaCheck(hipMalloc(&d_obj_ptr, sizeof(GPU::SimpleVector<int>)));
// ... and copy the object to the device.
cudaCheck(hipMemcpy(d_obj_ptr, tmp_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
int numBlocks = 5;
int numThreadsPerBlock = 256;
hipLaunchKernelGGL(( vector_pushback), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_obj_ptr);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
hipLaunchKernelGGL(( vector_reset), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_obj_ptr);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
assert(obj_ptr->size() == 0);
hipLaunchKernelGGL(( vector_emplace_back), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_obj_ptr);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
cudaCheck(hipMemcpy(data_ptr, d_data_ptr, obj_ptr->size() * sizeof(int), hipMemcpyDefault));
cudaCheck(hipHostFree(obj_ptr));
cudaCheck(hipHostFree(data_ptr));
cudaCheck(hipHostFree(tmp_obj_ptr));
cudaCheck(hipFree(d_data_ptr));
cudaCheck(hipFree(d_obj_ptr));
std::cout << "TEST PASSED" << std::endl;
return 0;
}
| 1c29079c198930b4be6666d58ff40c2e36f8e3d2.cu | // author: Felice Pantaleo, CERN, 2018
#include <cassert>
#include <iostream>
#include <new>
#include <cuda.h>
#include <cuda_runtime.h>
#include "HeterogeneousCore/CUDAUtilities/interface/GPUSimpleVector.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h"
__global__ void vector_pushback(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->push_back(index);
}
__global__ void vector_reset(GPU::SimpleVector<int> *foo) { foo->reset(); }
__global__ void vector_emplace_back(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->emplace_back(index);
}
int main() {
exitSansCUDADevices();
auto maxN = 10000;
GPU::SimpleVector<int> *obj_ptr = nullptr;
GPU::SimpleVector<int> *d_obj_ptr = nullptr;
GPU::SimpleVector<int> *tmp_obj_ptr = nullptr;
int *data_ptr = nullptr;
int *d_data_ptr = nullptr;
cudaCheck(cudaMallocHost(&obj_ptr, sizeof(GPU::SimpleVector<int>)));
cudaCheck(cudaMallocHost(&data_ptr, maxN * sizeof(int)));
cudaCheck(cudaMalloc(&d_data_ptr, maxN * sizeof(int)));
auto v = GPU::make_SimpleVector(obj_ptr, maxN, data_ptr);
cudaCheck(cudaMallocHost(&tmp_obj_ptr, sizeof(GPU::SimpleVector<int>)));
GPU::make_SimpleVector(tmp_obj_ptr, maxN, d_data_ptr);
assert(tmp_obj_ptr->size() == 0);
assert(tmp_obj_ptr->capacity() == static_cast<int>(maxN));
cudaCheck(cudaMalloc(&d_obj_ptr, sizeof(GPU::SimpleVector<int>)));
// ... and copy the object to the device.
cudaCheck(cudaMemcpy(d_obj_ptr, tmp_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
int numBlocks = 5;
int numThreadsPerBlock = 256;
vector_pushback<<<numBlocks, numThreadsPerBlock>>>(d_obj_ptr);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
vector_reset<<<numBlocks, numThreadsPerBlock>>>(d_obj_ptr);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
assert(obj_ptr->size() == 0);
vector_emplace_back<<<numBlocks, numThreadsPerBlock>>>(d_obj_ptr);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
cudaCheck(cudaMemcpy(data_ptr, d_data_ptr, obj_ptr->size() * sizeof(int), cudaMemcpyDefault));
cudaCheck(cudaFreeHost(obj_ptr));
cudaCheck(cudaFreeHost(data_ptr));
cudaCheck(cudaFreeHost(tmp_obj_ptr));
cudaCheck(cudaFree(d_data_ptr));
cudaCheck(cudaFree(d_obj_ptr));
std::cout << "TEST PASSED" << std::endl;
return 0;
}
|
2d1748581b98c598bbae5e5ba7e2f1f805863718.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "ripplingcompute.h"
__global__ void kernelFillImage ( uchar4* ptrDevImageGL, int w, int h, float t ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
int i, j;
while ( s < h * w ) {
Indice2D::pixelIJ ( s, w, i, j );
computeRippling ( ptrDevImageGL[s], w, i, j, t );
s += nbThreads;
}
}
void launchKernelFillImage ( uchar4* ptrDevImageGL, int w, int h, float t ) {
dim3 dg = dim3 ( 16, 1, 1 );
dim3 db = dim3 ( 32, 1, 1 );hipLaunchKernelGGL((
kernelFillImage), dim3(dg), dim3(db), 0, 0, ptrDevImageGL, w, h, t);
}
| 2d1748581b98c598bbae5e5ba7e2f1f805863718.cu | #include "Indice2D.h"
#include "ripplingcompute.h"
__global__ void kernelFillImage ( uchar4* ptrDevImageGL, int w, int h, float t ) {
int tid = Indice2D::tid ();
int nbThreads = Indice2D::nbThread ();
int s = tid;
int i, j;
while ( s < h * w ) {
Indice2D::pixelIJ ( s, w, i, j );
computeRippling ( ptrDevImageGL[s], w, i, j, t );
s += nbThreads;
}
}
void launchKernelFillImage ( uchar4* ptrDevImageGL, int w, int h, float t ) {
dim3 dg = dim3 ( 16, 1, 1 );
dim3 db = dim3 ( 32, 1, 1 );
kernelFillImage<<<dg, db>>>(ptrDevImageGL, w, h, t);
}
|
aad921cac922a78e60cf136371bf1f9eb049ac7c.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include "Point.cpp"
#include "stopwatch.hpp"
using namespace std;
__global__ void kernelFoo(float *CPx,float *CPy,float *CPz,float *u_knots,float *v_knots,float *opx,float *opy,float *opz,float *ofx,float *ofy,float *ofz)
{
volatile __shared__ float u[6],v[6],uf,vf;
volatile __shared__ float Px[4][4],P1x[4][4],Pux[4][4],Pvx[4][4];
volatile __shared__ float Py[4][4],P1y[4][4],Puy[4][4],Pvy[4][4];
volatile __shared__ float Pz[4][4],P1z[4][4],Puz[4][4],Pvz[4][4];
volatile __shared__ int iff,jff;
int i = threadIdx.x;
int j = threadIdx.y;
int ib = blockIdx.x;
int jb = blockIdx.y;
if(i==0&&j==0)
{
uf = ib*4.0/100.0 + 0.5*4.0/100.0;
vf = jb*4.0/100.0 + 0.5*4.0/100.0;
for(int ii = 0;ii<9;ii++) if(u_knots[ii]<=uf&&uf<u_knots[ii+1]) iff = ii;
for(int jj = 0;jj<9;jj++) if(v_knots[jj]<=vf&&vf<v_knots[jj+1]) jff = jj;
for(int ii = 0;ii<6;ii++) u[ii] = u_knots[ii + iff - 2];
for(int jj = 0;jj<6;jj++) v[jj] = v_knots[jj + jff - 2];
}
__syncthreads();
int ii = i + iff - 2;
int jj = j + jff - 2;
Px[i][j] = CPx[7*jj+ii];
Py[i][j] = CPy[7*jj+ii];
Pz[i][j] = CPz[7*jj+ii];
__syncthreads();
for(int k = 0; k<=2; k++)
{
if(i<=2-k&&j<=2-k)
{
float uo = (uf-u[i+k])/(u[i+3]-u[i+k]);
float vo = (vf-v[j+k])/(v[j+3]-v[j+k]);
P1x[i][j] = Px[i][j]*(1.0-uo)*(1.0-vo) + Px[i+1][j]*uo*(1-vo) + Px[i][j+1]*vo*(1-uo)+Px[i+1][j+1]*uo*vo;
P1y[i][j] = Py[i][j]*(1.0-uo)*(1.0-vo) + Py[i+1][j]*uo*(1-vo) + Py[i][j+1]*vo*(1-uo)+Py[i+1][j+1]*uo*vo;
P1z[i][j] = Pz[i][j]*(1.0-uo)*(1.0-vo) + Pz[i+1][j]*uo*(1-vo) + Pz[i][j+1]*vo*(1-uo)+Pz[i+1][j+1]*uo*vo;
if(k==2)
{
Pux[i][j] = Px[i][j]*(-1)*(1-vo) + Px[i+1][j]*(1)*(1-vo) + Px[i][j+1]*vo*(-1)+Px[i+1][j+1]*(1)*vo;
Puy[i][j] = Py[i][j]*(-1)*(1-vo) + Py[i+1][j]*(1)*(1-vo) + Py[i][j+1]*vo*(-1)+Py[i+1][j+1]*(1)*vo;
Puz[i][j] = Pz[i][j]*(-1)*(1-vo) + Pz[i+1][j]*(1)*(1-vo) + Pz[i][j+1]*vo*(-1)+Pz[i+1][j+1]*(1)*vo;
Pvx[i][j] = Px[i][j]*(1-uo)*(-1) + Px[i+1][j]*uo*(-1) + Px[i][j+1]*1*(1-uo)+Px[i+1][j+1]*uo*1;
Pvy[i][j] = Py[i][j]*(1-uo)*(-1) + Py[i+1][j]*uo*(-1) + Py[i][j+1]*1*(1-uo)+Py[i+1][j+1]*uo*1;
Pvz[i][j] = Pz[i][j]*(1-uo)*(-1) + Pz[i+1][j]*uo*(-1) + Pz[i][j+1]*1*(1-uo)+Pz[i+1][j+1]*uo*1;
}
}
__syncthreads();
if(i<=2-k&&j<=2-k) Px[i][j] = P1x[i][j];
if(i<=2-k&&j<=2-k) Py[i][j] = P1y[i][j];
if(i<=2-k&&j<=2-k) Pz[i][j] = P1z[i][j];
__syncthreads();
}
// Point and first order derivates
if(i==0&&j==0)
{
float Sx = Px[0][0];
float Sy = Py[0][0];
float Sz = Pz[0][0];
float Sux = 3.0*Pux[0][0];
float Suy = 3.0*Puy[0][0];
float Suz = 3.0*Puz[0][0];
float Svx = 3.0*Pvx[0][0];
float Svy = 3.0*Pvy[0][0];
float Svz = 3.0*Pvz[0][0];
opx[jb*100 + ib] = Sx;
opy[jb*100 + ib] = Sy;
opz[jb*100 + ib] = Sz;
ofx[jb*100 + ib] = Suy*Svz-Suz*Svy;//xNormal
ofy[jb*100 + ib] = Suz*Svx-Sux*Svz;//yNormal
ofz[jb*100 + ib] = Sux*Svy-Suy*Svx;//zNormal
}
}
void seqkernelFoo(Point CP[49],float u_knots[9],float v_knots[9],Point *op);
int main()
{
//Chosing correct interval of knots and control points for the following value
Point op[10000];// Point input array
Point gridop[100][100],gridof[100][100];// Point and normal offset
Point P[49] = {//Grid of control points
Point(0,0,0), Point(10,0,0), Point(20,0,0), Point(30,0,0), Point(40,0,0),Point(50,0,0),Point(60,0,0),
Point(0,10,0),Point(10,10,10),Point(20,10,30),Point(30,10,25),Point(40,10,15),Point(50,10,15),Point(60,10,5),
Point(0,20,5),Point(10,20,20),Point(20,20,40),Point(30,20,45),Point(40,20,35),Point(50,20,30),Point(60,20,15),
Point(0,30,15),Point(10,30,20),Point(20,30,35),Point(30,30,40),Point(40,30,45),Point(50,30,35),Point(60,30,25),
Point(0,40,10),Point(10,40,30),Point(20,40,35),Point(30,40,35),Point(40,40,50),Point(50,40,40),Point(60,40,20),
Point(0,50,5),Point(10,50,15),Point(20,50,15),Point(30,50,25),Point(40,50,30),Point(50,50,25),Point(60,50,15),
Point(0,60,0),Point(10,60,5),Point(20,60,10),Point(30,60,15),Point(40,60,20),Point(50,60,15),Point(60,60,5),
};
float Px[49],Py[49],Pz[49];// Input control points
float opx[10000],opy[10000],opz[10000];// Point on the fabric surface
float ofx[10000],ofy[10000],ofz[10000];// Offset Point for yarn
for(int i = 0;i<49;i++)
{
Px[i] = P[i].x;
Py[i] = P[i].y;
Pz[i] = P[i].z;
}
float u[9] = {0,0,0,1,2,3,4,4,4};
float v[9] = {0,0,0,1,2,3,4,4,4};
// int sizeP = 49 * sizeof(Point);
int sizePx = 49 * sizeof(float);
// int sizeop = 10000 * sizeof(Point);
int sizeopx = 10000 * sizeof(float);
int sizeofx = 10000 * sizeof(float);
int sizek = 9 * sizeof(float);
// Point *cuda_P; = hipMalloc(&cuda_P,sizeP);
float *cuda_Px; hipMalloc(&cuda_Px,sizePx);
float *cuda_Py; hipMalloc(&cuda_Py,sizePx);
float *cuda_Pz; hipMalloc(&cuda_Pz,sizePx);
// Point *cuda_op; = (Point *) hipMalloc(sizeop);
float *cuda_opx; hipMalloc(&cuda_opx,sizeopx);
float *cuda_opy; hipMalloc(&cuda_opy,sizeopx);
float *cuda_opz; hipMalloc(&cuda_opz,sizeopx);
float *cuda_ofx; hipMalloc(&cuda_ofx,sizeofx);
float *cuda_ofy; hipMalloc(&cuda_ofy,sizeofx);
float *cuda_ofz; hipMalloc(&cuda_ofz,sizeofx);
float *cuda_u_knots; hipMalloc(&cuda_u_knots,sizek);
float *cuda_v_knots; hipMalloc(&cuda_v_knots,sizek);
dim3 DimGrid(100, 100); // 10000 thread blocks
dim3 DimBlock(4, 4); // 16 threads per block
//defining variables for timing
hipEvent_t startEvent_inc, stopEvent_inc;
hipEventCreate(&startEvent_inc);
hipEventCreate(&stopEvent_inc);
float elapsedTime_inc,seqTime;
hipEventRecord(startEvent_inc, 0); // starting timing for inclusive
// hipMemcpy(cuda_P, P, sizeP, hipMemcpyHostToDevice);
hipMemcpy(cuda_Px, Px, sizePx, hipMemcpyHostToDevice);
hipMemcpy(cuda_Py, Py, sizePx, hipMemcpyHostToDevice);
hipMemcpy(cuda_Pz, Pz, sizePx, hipMemcpyHostToDevice);
hipMemcpy(cuda_u_knots, u, sizek, hipMemcpyHostToDevice);
hipMemcpy(cuda_v_knots, v, sizek, hipMemcpyHostToDevice);
//hipLaunchKernelGGL(( kernelFoo), dim3(DimGrid),dim3(DimBlock), 0, 0, cuda_P,cuda_u_knots,cuda_v_knots,cuda_op);
hipLaunchKernelGGL(( kernelFoo), dim3(DimGrid),dim3(DimBlock), 0, 0, cuda_Px,cuda_Py,cuda_Pz,cuda_u_knots,cuda_v_knots,cuda_opx,cuda_opy,cuda_opz,cuda_ofx,cuda_ofy,cuda_ofz);
// hipMemcpy(op, cuda_op,sizeop, hipMemcpyDeviceToHost);
hipMemcpy(opx, cuda_opx,sizeopx, hipMemcpyDeviceToHost);
hipMemcpy(opy, cuda_opy,sizeopx, hipMemcpyDeviceToHost);
hipMemcpy(opz, cuda_opz,sizeopx, hipMemcpyDeviceToHost);
hipMemcpy(ofx, cuda_ofx,sizeofx, hipMemcpyDeviceToHost);
hipMemcpy(ofy, cuda_ofy,sizeofx, hipMemcpyDeviceToHost);
hipMemcpy(ofz, cuda_ofz,sizeofx, hipMemcpyDeviceToHost);
hipEventRecord(stopEvent_inc, 0); //ending timing for inclusive
hipEventSynchronize(stopEvent_inc);
hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
stopwatch<std::milli, float> sw;
// Start the timer
sw.start();
seqkernelFoo(P,u,v,op);
sw.stop();
seqTime = sw.count();
printf("Cuda time (ms)= %f\n",elapsedTime_inc);
printf("Sequential time (ms) = %f\n",seqTime);
float tol = 1e-4;
for(int i = 0;i<10000;i++)
{
float tolx = fabs(op[i].x - opx[i]);
float toly = fabs(op[i].y - opy[i]);
float tolz = fabs(op[i].z - opz[i]);
if(tolx>tol||toly>tol||tolz>tol)
{ printf("Tolerence problem detected : ");
printf("i = %i\t",i);
printf("%f %f %f\t",op[i].x,op[i].y,op[i].z);
printf("%f %f %f\n",opx[i],opy[i],opz[i]);
}
}
for(int i = 0;i<100;i++)
{
for(int j = 0;j<100;j++)
{
gridop[i][j] = Point(opx[100*j+i],opy[100*j+i],opz[100*j+i]);
gridof[i][j] = Point(ofx[100*j+i],ofy[100*j+i],ofz[100*j+i]);
}
}
FILE* fp = fopen("op.data","w");
for(int i = 0;i<100;i+=10)
{
for(int j = 0;j<100;j+=10)
{
fprintf(fp,"%f\t%f\t%f\n",gridop[i][j].x,gridop[i][j].y,gridop[i][j].z);
} fprintf(fp,"\n");
}
fclose(fp);
float d1;
fp = fopen("fabric.data","w");
for(int i = 0;i<100;i++)// Y-direction Yarns
{
for(int j = 0;j<100;j++)
{
if(j%2==0) d1 = -1;//Alternating yarns
else d1 = 1;
if(i%2==0) d1 = -d1;
Point yP = gridop[i][j] + d1*gridof[i][j]/norm(gridof[i][j]);//yarnPoint
fprintf(fp,"%f\t%f\t%f\n",yP.x,yP.y,yP.z);
} fprintf(fp,"\n\n");
}fprintf(fp,"\n\n");
for(int j = 0;j<100;j++)// X-direction Yarns
{
for(int i = 0;i<100;i++)
{
if(i%2==0) d1 = 1;//Alternating yarns
else d1 = -1;
if(j%2==0) d1 = -d1;
Point yP = gridop[i][j] + d1*gridof[i][j]/norm(gridof[i][j]);//yarnPoint
fprintf(fp,"%f\t%f\t%f\n",yP.x,yP.y,yP.z);
} fprintf(fp,"\n\n");
}fprintf(fp,"\n\n");
fclose(fp);
return 0;
}
void seqkernelFoo(Point CP[49],float u_knots[9],float v_knots[9],Point *op)
{
float u[6],v[6],uf,vf;int iff,jff;
Point P[4][4],P1[4][4],Pu[4][4],Pv[4][4];
for(int ib = 0;ib<100;ib++)
{
for(int jb = 0;jb<100;jb++)
{
uf = ib*4.0/100.0 + 0.5*4.0/100.0;
vf = jb*4.0/100.0 + 0.5*4.0/100.0;
for(int ii = 0;ii<8;ii++) if((u_knots[ii]<=uf)&&(uf<u_knots[ii+1])) iff = ii;
for(int jj = 0;jj<8;jj++) if((v_knots[jj]<=vf)&&(vf<v_knots[jj+1])) jff = jj;
for(int ii = 0;ii<6;ii++) u[ii] = u_knots[ii + iff - 2];
for(int jj = 0;jj<6;jj++) v[jj] = v_knots[jj + jff - 2];
for(int i = 0;i<4;i++)
{
for(int j = 0;j<4;j++)
{
int ii = i + iff - 2;
int jj = j + jff - 2;
P[i][j] = CP[7*jj+ii];
}
}
for(int k = 0; k<=2; k++)
{
for(int i = 0;i<=2-k;i++)
{
for(int j = 0;j<=2-k;j++)
{
float uo = (uf-u[i+k])/(u[i+3]-u[i+k]);
float vo = (vf-v[j+k])/(v[j+3]-v[j+k]);
P1[i][j] = P[i][j]*(1.0-uo)*(1.0-vo) + P[i+1][j]*uo*(1-vo) + P[i][j+1]*vo*(1-uo)+P[i+1][j+1]*uo*vo;
if(k==2)
{
Pu[i][j] = P[i][j]*(-1)*(1-vo) + P[i+1][j]*(1)*(1-vo) + P[i][j+1]*vo*(-1)+P[i+1][j+1]*(1)*vo;
Pv[i][j] = P[i][j]*(1-uo)*(-1) + P[i+1][j]*uo*(-1) + P[i][j+1]*1*(1-uo)+P[i+1][j+1]*uo*1;
}
}
}
for(int i = 0;i<=2-k;i++){
for(int j = 0;j<=2-k;j++){
P[i][j] = P1[i][j];}}
}
// Point and first order derivates
Point S = P[0][0];
Point Su = 3.0*Pu[0][0];
Point Sv = 3.0*Pv[0][0];
op[jb*100+ib] = S;
}
}
}
| aad921cac922a78e60cf136371bf1f9eb049ac7c.cu | #include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include <cuda.h>
#include <math.h>
#include "Point.cpp"
#include "stopwatch.hpp"
using namespace std;
__global__ void kernelFoo(float *CPx,float *CPy,float *CPz,float *u_knots,float *v_knots,float *opx,float *opy,float *opz,float *ofx,float *ofy,float *ofz)
{
volatile __shared__ float u[6],v[6],uf,vf;
volatile __shared__ float Px[4][4],P1x[4][4],Pux[4][4],Pvx[4][4];
volatile __shared__ float Py[4][4],P1y[4][4],Puy[4][4],Pvy[4][4];
volatile __shared__ float Pz[4][4],P1z[4][4],Puz[4][4],Pvz[4][4];
volatile __shared__ int iff,jff;
int i = threadIdx.x;
int j = threadIdx.y;
int ib = blockIdx.x;
int jb = blockIdx.y;
if(i==0&&j==0)
{
uf = ib*4.0/100.0 + 0.5*4.0/100.0;
vf = jb*4.0/100.0 + 0.5*4.0/100.0;
for(int ii = 0;ii<9;ii++) if(u_knots[ii]<=uf&&uf<u_knots[ii+1]) iff = ii;
for(int jj = 0;jj<9;jj++) if(v_knots[jj]<=vf&&vf<v_knots[jj+1]) jff = jj;
for(int ii = 0;ii<6;ii++) u[ii] = u_knots[ii + iff - 2];
for(int jj = 0;jj<6;jj++) v[jj] = v_knots[jj + jff - 2];
}
__syncthreads();
int ii = i + iff - 2;
int jj = j + jff - 2;
Px[i][j] = CPx[7*jj+ii];
Py[i][j] = CPy[7*jj+ii];
Pz[i][j] = CPz[7*jj+ii];
__syncthreads();
for(int k = 0; k<=2; k++)
{
if(i<=2-k&&j<=2-k)
{
float uo = (uf-u[i+k])/(u[i+3]-u[i+k]);
float vo = (vf-v[j+k])/(v[j+3]-v[j+k]);
P1x[i][j] = Px[i][j]*(1.0-uo)*(1.0-vo) + Px[i+1][j]*uo*(1-vo) + Px[i][j+1]*vo*(1-uo)+Px[i+1][j+1]*uo*vo;
P1y[i][j] = Py[i][j]*(1.0-uo)*(1.0-vo) + Py[i+1][j]*uo*(1-vo) + Py[i][j+1]*vo*(1-uo)+Py[i+1][j+1]*uo*vo;
P1z[i][j] = Pz[i][j]*(1.0-uo)*(1.0-vo) + Pz[i+1][j]*uo*(1-vo) + Pz[i][j+1]*vo*(1-uo)+Pz[i+1][j+1]*uo*vo;
if(k==2)
{
Pux[i][j] = Px[i][j]*(-1)*(1-vo) + Px[i+1][j]*(1)*(1-vo) + Px[i][j+1]*vo*(-1)+Px[i+1][j+1]*(1)*vo;
Puy[i][j] = Py[i][j]*(-1)*(1-vo) + Py[i+1][j]*(1)*(1-vo) + Py[i][j+1]*vo*(-1)+Py[i+1][j+1]*(1)*vo;
Puz[i][j] = Pz[i][j]*(-1)*(1-vo) + Pz[i+1][j]*(1)*(1-vo) + Pz[i][j+1]*vo*(-1)+Pz[i+1][j+1]*(1)*vo;
Pvx[i][j] = Px[i][j]*(1-uo)*(-1) + Px[i+1][j]*uo*(-1) + Px[i][j+1]*1*(1-uo)+Px[i+1][j+1]*uo*1;
Pvy[i][j] = Py[i][j]*(1-uo)*(-1) + Py[i+1][j]*uo*(-1) + Py[i][j+1]*1*(1-uo)+Py[i+1][j+1]*uo*1;
Pvz[i][j] = Pz[i][j]*(1-uo)*(-1) + Pz[i+1][j]*uo*(-1) + Pz[i][j+1]*1*(1-uo)+Pz[i+1][j+1]*uo*1;
}
}
__syncthreads();
if(i<=2-k&&j<=2-k) Px[i][j] = P1x[i][j];
if(i<=2-k&&j<=2-k) Py[i][j] = P1y[i][j];
if(i<=2-k&&j<=2-k) Pz[i][j] = P1z[i][j];
__syncthreads();
}
// Point and first order derivates
if(i==0&&j==0)
{
float Sx = Px[0][0];
float Sy = Py[0][0];
float Sz = Pz[0][0];
float Sux = 3.0*Pux[0][0];
float Suy = 3.0*Puy[0][0];
float Suz = 3.0*Puz[0][0];
float Svx = 3.0*Pvx[0][0];
float Svy = 3.0*Pvy[0][0];
float Svz = 3.0*Pvz[0][0];
opx[jb*100 + ib] = Sx;
opy[jb*100 + ib] = Sy;
opz[jb*100 + ib] = Sz;
ofx[jb*100 + ib] = Suy*Svz-Suz*Svy;//xNormal
ofy[jb*100 + ib] = Suz*Svx-Sux*Svz;//yNormal
ofz[jb*100 + ib] = Sux*Svy-Suy*Svx;//zNormal
}
}
void seqkernelFoo(Point CP[49],float u_knots[9],float v_knots[9],Point *op);
int main()
{
//Chosing correct interval of knots and control points for the following value
Point op[10000];// Point input array
Point gridop[100][100],gridof[100][100];// Point and normal offset
Point P[49] = {//Grid of control points
Point(0,0,0), Point(10,0,0), Point(20,0,0), Point(30,0,0), Point(40,0,0),Point(50,0,0),Point(60,0,0),
Point(0,10,0),Point(10,10,10),Point(20,10,30),Point(30,10,25),Point(40,10,15),Point(50,10,15),Point(60,10,5),
Point(0,20,5),Point(10,20,20),Point(20,20,40),Point(30,20,45),Point(40,20,35),Point(50,20,30),Point(60,20,15),
Point(0,30,15),Point(10,30,20),Point(20,30,35),Point(30,30,40),Point(40,30,45),Point(50,30,35),Point(60,30,25),
Point(0,40,10),Point(10,40,30),Point(20,40,35),Point(30,40,35),Point(40,40,50),Point(50,40,40),Point(60,40,20),
Point(0,50,5),Point(10,50,15),Point(20,50,15),Point(30,50,25),Point(40,50,30),Point(50,50,25),Point(60,50,15),
Point(0,60,0),Point(10,60,5),Point(20,60,10),Point(30,60,15),Point(40,60,20),Point(50,60,15),Point(60,60,5),
};
float Px[49],Py[49],Pz[49];// Input control points
float opx[10000],opy[10000],opz[10000];// Point on the fabric surface
float ofx[10000],ofy[10000],ofz[10000];// Offset Point for yarn
for(int i = 0;i<49;i++)
{
Px[i] = P[i].x;
Py[i] = P[i].y;
Pz[i] = P[i].z;
}
float u[9] = {0,0,0,1,2,3,4,4,4};
float v[9] = {0,0,0,1,2,3,4,4,4};
// int sizeP = 49 * sizeof(Point);
int sizePx = 49 * sizeof(float);
// int sizeop = 10000 * sizeof(Point);
int sizeopx = 10000 * sizeof(float);
int sizeofx = 10000 * sizeof(float);
int sizek = 9 * sizeof(float);
// Point *cuda_P; = cudaMalloc(&cuda_P,sizeP);
float *cuda_Px; cudaMalloc(&cuda_Px,sizePx);
float *cuda_Py; cudaMalloc(&cuda_Py,sizePx);
float *cuda_Pz; cudaMalloc(&cuda_Pz,sizePx);
// Point *cuda_op; = (Point *) cudaMalloc(sizeop);
float *cuda_opx; cudaMalloc(&cuda_opx,sizeopx);
float *cuda_opy; cudaMalloc(&cuda_opy,sizeopx);
float *cuda_opz; cudaMalloc(&cuda_opz,sizeopx);
float *cuda_ofx; cudaMalloc(&cuda_ofx,sizeofx);
float *cuda_ofy; cudaMalloc(&cuda_ofy,sizeofx);
float *cuda_ofz; cudaMalloc(&cuda_ofz,sizeofx);
float *cuda_u_knots; cudaMalloc(&cuda_u_knots,sizek);
float *cuda_v_knots; cudaMalloc(&cuda_v_knots,sizek);
dim3 DimGrid(100, 100); // 10000 thread blocks
dim3 DimBlock(4, 4); // 16 threads per block
//defining variables for timing
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
float elapsedTime_inc,seqTime;
cudaEventRecord(startEvent_inc, 0); // starting timing for inclusive
// cudaMemcpy(cuda_P, P, sizeP, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Px, Px, sizePx, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Py, Py, sizePx, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Pz, Pz, sizePx, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_u_knots, u, sizek, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_v_knots, v, sizek, cudaMemcpyHostToDevice);
// kernelFoo<<<DimGrid,DimBlock>>>(cuda_P,cuda_u_knots,cuda_v_knots,cuda_op);
kernelFoo<<<DimGrid,DimBlock>>>(cuda_Px,cuda_Py,cuda_Pz,cuda_u_knots,cuda_v_knots,cuda_opx,cuda_opy,cuda_opz,cuda_ofx,cuda_ofy,cuda_ofz);
// cudaMemcpy(op, cuda_op,sizeop, cudaMemcpyDeviceToHost);
cudaMemcpy(opx, cuda_opx,sizeopx, cudaMemcpyDeviceToHost);
cudaMemcpy(opy, cuda_opy,sizeopx, cudaMemcpyDeviceToHost);
cudaMemcpy(opz, cuda_opz,sizeopx, cudaMemcpyDeviceToHost);
cudaMemcpy(ofx, cuda_ofx,sizeofx, cudaMemcpyDeviceToHost);
cudaMemcpy(ofy, cuda_ofy,sizeofx, cudaMemcpyDeviceToHost);
cudaMemcpy(ofz, cuda_ofz,sizeofx, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent_inc, 0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
stopwatch<std::milli, float> sw;
// Start the timer
sw.start();
seqkernelFoo(P,u,v,op);
sw.stop();
seqTime = sw.count();
printf("Cuda time (ms)= %f\n",elapsedTime_inc);
printf("Sequential time (ms) = %f\n",seqTime);
float tol = 1e-4;
for(int i = 0;i<10000;i++)
{
float tolx = fabs(op[i].x - opx[i]);
float toly = fabs(op[i].y - opy[i]);
float tolz = fabs(op[i].z - opz[i]);
if(tolx>tol||toly>tol||tolz>tol)
{ printf("Tolerence problem detected : ");
printf("i = %i\t",i);
printf("%f %f %f\t",op[i].x,op[i].y,op[i].z);
printf("%f %f %f\n",opx[i],opy[i],opz[i]);
}
}
for(int i = 0;i<100;i++)
{
for(int j = 0;j<100;j++)
{
gridop[i][j] = Point(opx[100*j+i],opy[100*j+i],opz[100*j+i]);
gridof[i][j] = Point(ofx[100*j+i],ofy[100*j+i],ofz[100*j+i]);
}
}
FILE* fp = fopen("op.data","w");
for(int i = 0;i<100;i+=10)
{
for(int j = 0;j<100;j+=10)
{
fprintf(fp,"%f\t%f\t%f\n",gridop[i][j].x,gridop[i][j].y,gridop[i][j].z);
} fprintf(fp,"\n");
}
fclose(fp);
float d1;
fp = fopen("fabric.data","w");
for(int i = 0;i<100;i++)// Y-direction Yarns
{
for(int j = 0;j<100;j++)
{
if(j%2==0) d1 = -1;//Alternating yarns
else d1 = 1;
if(i%2==0) d1 = -d1;
Point yP = gridop[i][j] + d1*gridof[i][j]/norm(gridof[i][j]);//yarnPoint
fprintf(fp,"%f\t%f\t%f\n",yP.x,yP.y,yP.z);
} fprintf(fp,"\n\n");
}fprintf(fp,"\n\n");
for(int j = 0;j<100;j++)// X-direction Yarns
{
for(int i = 0;i<100;i++)
{
if(i%2==0) d1 = 1;//Alternating yarns
else d1 = -1;
if(j%2==0) d1 = -d1;
Point yP = gridop[i][j] + d1*gridof[i][j]/norm(gridof[i][j]);//yarnPoint
fprintf(fp,"%f\t%f\t%f\n",yP.x,yP.y,yP.z);
} fprintf(fp,"\n\n");
}fprintf(fp,"\n\n");
fclose(fp);
return 0;
}
void seqkernelFoo(Point CP[49],float u_knots[9],float v_knots[9],Point *op)
{
float u[6],v[6],uf,vf;int iff,jff;
Point P[4][4],P1[4][4],Pu[4][4],Pv[4][4];
for(int ib = 0;ib<100;ib++)
{
for(int jb = 0;jb<100;jb++)
{
uf = ib*4.0/100.0 + 0.5*4.0/100.0;
vf = jb*4.0/100.0 + 0.5*4.0/100.0;
for(int ii = 0;ii<8;ii++) if((u_knots[ii]<=uf)&&(uf<u_knots[ii+1])) iff = ii;
for(int jj = 0;jj<8;jj++) if((v_knots[jj]<=vf)&&(vf<v_knots[jj+1])) jff = jj;
for(int ii = 0;ii<6;ii++) u[ii] = u_knots[ii + iff - 2];
for(int jj = 0;jj<6;jj++) v[jj] = v_knots[jj + jff - 2];
for(int i = 0;i<4;i++)
{
for(int j = 0;j<4;j++)
{
int ii = i + iff - 2;
int jj = j + jff - 2;
P[i][j] = CP[7*jj+ii];
}
}
for(int k = 0; k<=2; k++)
{
for(int i = 0;i<=2-k;i++)
{
for(int j = 0;j<=2-k;j++)
{
float uo = (uf-u[i+k])/(u[i+3]-u[i+k]);
float vo = (vf-v[j+k])/(v[j+3]-v[j+k]);
P1[i][j] = P[i][j]*(1.0-uo)*(1.0-vo) + P[i+1][j]*uo*(1-vo) + P[i][j+1]*vo*(1-uo)+P[i+1][j+1]*uo*vo;
if(k==2)
{
Pu[i][j] = P[i][j]*(-1)*(1-vo) + P[i+1][j]*(1)*(1-vo) + P[i][j+1]*vo*(-1)+P[i+1][j+1]*(1)*vo;
Pv[i][j] = P[i][j]*(1-uo)*(-1) + P[i+1][j]*uo*(-1) + P[i][j+1]*1*(1-uo)+P[i+1][j+1]*uo*1;
}
}
}
for(int i = 0;i<=2-k;i++){
for(int j = 0;j<=2-k;j++){
P[i][j] = P1[i][j];}}
}
// Point and first order derivates
Point S = P[0][0];
Point Su = 3.0*Pu[0][0];
Point Sv = 3.0*Pv[0][0];
op[jb*100+ib] = S;
}
}
}
|
dd7525f62037433392ba3f3c51aa233b3c978e2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<cuda.h>
char huffcodes[128][80];
typedef struct
{
char ch;
int freq;
}cnode;
typedef struct node
{
char id[20];
cnode c;
struct node *left;
struct node *right;
int weight;
}tnode;
typedef struct qnode
{
tnode *x;
struct qnode *next;
}qnode;
tnode *createnode()
{
tnode *newnode;
newnode= CudaMalloc(sizeof(tnode));
newnode->left=NULL;
newnode->right=NULL;
return newnode;
}
__global__ void assignHuffcode(tnode *root,char *bin,int j)
{
if(root!=NULL)
{
if(root->left==NULL&&root->right==NULL)
{
int i;
for(i=0;i<=j;i++)
{
huffcodes[(int)root->id[0]][i]=bin[i];
}
}
++j;
bin[j]='0';
assignHuffcode(root->left,bin,j);
bin[j]='1';
assignHuffcode(root->right,bin,j);
}
}
__global__ void merge(cnode *s,int low,int mid, int high)
{
int i=low,j=mid+1,k=0;
cnode t[high-low+1];
while(i<=mid&&j<=high)
{
if(s[i].freq<s[j].freq)
t[k++]=s[i++];
else
t[k++]=s[j++];
}
while(i<=mid)
{
t[k++]=s[i++];
}
while(j<=high)
{
t[k++]=s[j++];
}
for(i=low;i<=high;i++)
s[i]=t[i-low];
}
__global__ void mergeSort(cnode *s,int low,int high)
{
if(low<high)
{
int mid=(low+high)/2;
mergeSort(s,low,mid);
mergeSort(s,mid+1,high);
merge(s,low,mid,high);
}
}
qnode *insert(qnode *qn,tnode *tn)
{
qnode *newnode;
newnode=CudaMalloc(sizeof(qnode));
newnode->next=qn;
newnode->x=tn;
qn=newnode;
return qn;
}
qnode *delpnode(qnode *qn,tnode *t1)
{
qnode *delNode;
if(qn->x==t1)
{
delNode=qn;
qn=qn->next;
free(delNode);
return qn;
}
qnode *i1;
i1=qn;
while(i1->next->x!=t1)
{
i1=i1->next;
}
delNode=i1->next;
i1->next=i1->next->next;
free(delNode);
return qn;
}
tnode *min(qnode *qn)
{
qnode *i,*j;
i=qn;
j=qn;
while(j!=NULL)
{
if(j->x->weight<i->x->weight)
i=j;
j=j->next;
}
return i->x;
}
int binTochar(char *buffer)
{
int i,sum=0;
for(i=0;i<8;i++)
{
sum=sum*2+((int)buffer[i]-48);
}
return sum;
}
__global__ void decToBin(char *bits,char ch1)
{
strcpy(bits,"00000000");
int num=ch1,i=7,m;
if(num<0)
num+=256;
while(num!=0)
{
m=num%2;
bits[i--]=m+48;
num/=2;
}
}
__global__ void treeOfCf(FILE *cf,tnode *root,char *buffer1,int *buffer1Index,int *cfChar)
{
if(root==NULL)
return;
if(root->left==NULL)
{
buffer1[(*buffer1Index)++]='1';
if(*buffer1Index==8)
{
fprintf(cf,"%c",binTochar(buffer1));
(*cfChar)++;
strcpy(buffer1,"");
*buffer1Index=0;
}
char s[9];
decToBin(s,root->c.ch);
int i;
for(i=0;i<8;i++)
{
buffer1[(*buffer1Index)++]=s[i];
if(*buffer1Index==8)
{
fprintf(cf,"%c",binTochar(buffer1));
(*cfChar)++;
strcpy(buffer1,"");
*buffer1Index=0;
}
}
}
else
{
buffer1[(*buffer1Index)++]='0';
if(*buffer1Index==8)
{
fprintf(cf,"%c",binTochar(buffer1));
(*cfChar)++;
strcpy(buffer1,"");
*buffer1Index=0;
}
treeOfCf(cf,root->left,buffer1,buffer1Index,cfChar);
treeOfCf(cf,root->right,buffer1,buffer1Index,cfChar);
}
}
__global__ void freeAllocatedMemory(tnode *root)
{
if(root!=NULL)
{
freeAllocatedMemory(root->left);
freeAllocatedMemory(root->right);
CudaFree(root);
}
}
int main()
{
int n=128,m=0,i;
char ch,file[30],outputFileC[30];
cnode s[n];
for(i=0;i<n;i++)
{
s[i].ch=(char)i;
s[i].freq=0;
}
FILE *fp,*compressedFile;
printf("Enter the file name to be compressed:");
scanf("%s",file);
fp=fopen(file,"r");
if(fp==NULL)
{
printf("Couldn't open input file\n");
return 1;
}
//counting frequencies
printf("Enter the file name for compressed file:");
scanf("%s",outputFileC);
while(fread(&ch,sizeof(char),1,fp))
{
(s[(int)ch].freq)++;
m++;
}
mergeSort(s,0,n-1);
int j=0;
while(s[j].freq==0)
j++;
qnode *qu;
qu=NULL;
tnode *temp;
for(i=j;i<n;i++)
{
temp=CudaMalloc(sizeof(tnode));
temp->id[0]=s[i].ch;
temp->c=s[i];
temp->left=NULL;
temp->right=NULL;
temp->weight=s[i].freq;
qu=insert(qu,temp);
}
qnode *q1;
q1=qu;
i=0;
while(q1!=NULL)
{
i++;
q1=q1->next;
}
tnode *root;
tnode *m1;
tnode *m2;
i=0;
while(i!=n-j-1)
{
m1=min(qu);
qu=delpnode(qu,m1);
m2=min(qu);
qu=delpnode(qu,m2);
tnode *parent;
parent=CudaMalloc(sizeof(tnode));
parent->weight = m1->weight+m2->weight;
strcpy(parent->id,"mid-node");
if(m1->weight<m2->weight)
{
parent->left=m1;
parent->right=m2;
}
else
{
parent->left=m2;
parent->right=m1;
}
qu=insert(qu,parent);
i++;
}
root=qu->x;
char bin[100];
strcpy(bin,"0");
assignHuffcode(root,bin,-1);
compressedFile=fopen(outputFileC,"wb");
char buffer1[9]="";
int buffer1Index=0;
int charCount=0;
treeOfCf(compressedFile,root,buffer1,&buffer1Index,&charCount);
fseek(fp,0,SEEK_SET);
char readedCh;
char code[80];
int codeIndex=0;
fread(&readedCh,sizeof(char),1,fp);
strcpy(code,huffcodes[(int)readedCh]);
int codeLen=strlen(code);
for(;buffer1Index<8;)
{
buffer1[buffer1Index++]=code[codeIndex++];
if(codeIndex==codeLen)
{
codeIndex=0;
fread(&readedCh,sizeof(char),1,fp);
strcpy(code,huffcodes[(int)readedCh]);
codeLen=strlen(code);
}
}
fprintf(compressedFile,"%c",binTochar(buffer1));
char buffer[9]="";
int bufferIndex=0;
int flag=0;
int buf;
while(1)
{
for(bufferIndex=0;bufferIndex<8;)
{
if(codeIndex==codeLen)
{
if(fread(&readedCh,sizeof(char),1,fp)==0)
{
flag=1;
for(i=0;i<bufferIndex;i++)
fprintf(compressedFile,"%c",buffer[i]);
break;
}
strcpy(code,huffcodes[(int)readedCh]);
codeLen=strlen(code);
codeIndex=0;
}
buffer[bufferIndex++]=code[codeIndex++];
}
if(flag==1)
break;
buf=binTochar(buffer);
fprintf(compressedFile,"%c",(char)buf);
strcpy(buffer,"");
charCount++;
}
fprintf(compressedFile,"&%d",charCount);
fclose(fp);
fclose(compressedFile);
freeAllocatedMemory(root);
CudaFree(qu);
printf("File successfully compressed");
return 0;
}
| dd7525f62037433392ba3f3c51aa233b3c978e2b.cu | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<cuda.h>
char huffcodes[128][80];
typedef struct
{
char ch;
int freq;
}cnode;
typedef struct node
{
char id[20];
cnode c;
struct node *left;
struct node *right;
int weight;
}tnode;
typedef struct qnode
{
tnode *x;
struct qnode *next;
}qnode;
tnode *createnode()
{
tnode *newnode;
newnode= CudaMalloc(sizeof(tnode));
newnode->left=NULL;
newnode->right=NULL;
return newnode;
}
__global__ void assignHuffcode(tnode *root,char *bin,int j)
{
if(root!=NULL)
{
if(root->left==NULL&&root->right==NULL)
{
int i;
for(i=0;i<=j;i++)
{
huffcodes[(int)root->id[0]][i]=bin[i];
}
}
++j;
bin[j]='0';
assignHuffcode(root->left,bin,j);
bin[j]='1';
assignHuffcode(root->right,bin,j);
}
}
__global__ void merge(cnode *s,int low,int mid, int high)
{
int i=low,j=mid+1,k=0;
cnode t[high-low+1];
while(i<=mid&&j<=high)
{
if(s[i].freq<s[j].freq)
t[k++]=s[i++];
else
t[k++]=s[j++];
}
while(i<=mid)
{
t[k++]=s[i++];
}
while(j<=high)
{
t[k++]=s[j++];
}
for(i=low;i<=high;i++)
s[i]=t[i-low];
}
__global__ void mergeSort(cnode *s,int low,int high)
{
if(low<high)
{
int mid=(low+high)/2;
mergeSort(s,low,mid);
mergeSort(s,mid+1,high);
merge(s,low,mid,high);
}
}
qnode *insert(qnode *qn,tnode *tn)
{
qnode *newnode;
newnode=CudaMalloc(sizeof(qnode));
newnode->next=qn;
newnode->x=tn;
qn=newnode;
return qn;
}
qnode *delpnode(qnode *qn,tnode *t1)
{
qnode *delNode;
if(qn->x==t1)
{
delNode=qn;
qn=qn->next;
free(delNode);
return qn;
}
qnode *i1;
i1=qn;
while(i1->next->x!=t1)
{
i1=i1->next;
}
delNode=i1->next;
i1->next=i1->next->next;
free(delNode);
return qn;
}
tnode *min(qnode *qn)
{
qnode *i,*j;
i=qn;
j=qn;
while(j!=NULL)
{
if(j->x->weight<i->x->weight)
i=j;
j=j->next;
}
return i->x;
}
int binTochar(char *buffer)
{
int i,sum=0;
for(i=0;i<8;i++)
{
sum=sum*2+((int)buffer[i]-48);
}
return sum;
}
__global__ void decToBin(char *bits,char ch1)
{
strcpy(bits,"00000000");
int num=ch1,i=7,m;
if(num<0)
num+=256;
while(num!=0)
{
m=num%2;
bits[i--]=m+48;
num/=2;
}
}
__global__ void treeOfCf(FILE *cf,tnode *root,char *buffer1,int *buffer1Index,int *cfChar)
{
if(root==NULL)
return;
if(root->left==NULL)
{
buffer1[(*buffer1Index)++]='1';
if(*buffer1Index==8)
{
fprintf(cf,"%c",binTochar(buffer1));
(*cfChar)++;
strcpy(buffer1,"");
*buffer1Index=0;
}
char s[9];
decToBin(s,root->c.ch);
int i;
for(i=0;i<8;i++)
{
buffer1[(*buffer1Index)++]=s[i];
if(*buffer1Index==8)
{
fprintf(cf,"%c",binTochar(buffer1));
(*cfChar)++;
strcpy(buffer1,"");
*buffer1Index=0;
}
}
}
else
{
buffer1[(*buffer1Index)++]='0';
if(*buffer1Index==8)
{
fprintf(cf,"%c",binTochar(buffer1));
(*cfChar)++;
strcpy(buffer1,"");
*buffer1Index=0;
}
treeOfCf(cf,root->left,buffer1,buffer1Index,cfChar);
treeOfCf(cf,root->right,buffer1,buffer1Index,cfChar);
}
}
__global__ void freeAllocatedMemory(tnode *root)
{
if(root!=NULL)
{
freeAllocatedMemory(root->left);
freeAllocatedMemory(root->right);
CudaFree(root);
}
}
int main()
{
int n=128,m=0,i;
char ch,file[30],outputFileC[30];
cnode s[n];
for(i=0;i<n;i++)
{
s[i].ch=(char)i;
s[i].freq=0;
}
FILE *fp,*compressedFile;
printf("Enter the file name to be compressed:");
scanf("%s",file);
fp=fopen(file,"r");
if(fp==NULL)
{
printf("Couldn't open input file\n");
return 1;
}
//counting frequencies
printf("Enter the file name for compressed file:");
scanf("%s",outputFileC);
while(fread(&ch,sizeof(char),1,fp))
{
(s[(int)ch].freq)++;
m++;
}
mergeSort(s,0,n-1);
int j=0;
while(s[j].freq==0)
j++;
qnode *qu;
qu=NULL;
tnode *temp;
for(i=j;i<n;i++)
{
temp=CudaMalloc(sizeof(tnode));
temp->id[0]=s[i].ch;
temp->c=s[i];
temp->left=NULL;
temp->right=NULL;
temp->weight=s[i].freq;
qu=insert(qu,temp);
}
qnode *q1;
q1=qu;
i=0;
while(q1!=NULL)
{
i++;
q1=q1->next;
}
tnode *root;
tnode *m1;
tnode *m2;
i=0;
while(i!=n-j-1)
{
m1=min(qu);
qu=delpnode(qu,m1);
m2=min(qu);
qu=delpnode(qu,m2);
tnode *parent;
parent=CudaMalloc(sizeof(tnode));
parent->weight = m1->weight+m2->weight;
strcpy(parent->id,"mid-node");
if(m1->weight<m2->weight)
{
parent->left=m1;
parent->right=m2;
}
else
{
parent->left=m2;
parent->right=m1;
}
qu=insert(qu,parent);
i++;
}
root=qu->x;
char bin[100];
strcpy(bin,"0");
assignHuffcode(root,bin,-1);
compressedFile=fopen(outputFileC,"wb");
char buffer1[9]="";
int buffer1Index=0;
int charCount=0;
treeOfCf(compressedFile,root,buffer1,&buffer1Index,&charCount);
fseek(fp,0,SEEK_SET);
char readedCh;
char code[80];
int codeIndex=0;
fread(&readedCh,sizeof(char),1,fp);
strcpy(code,huffcodes[(int)readedCh]);
int codeLen=strlen(code);
for(;buffer1Index<8;)
{
buffer1[buffer1Index++]=code[codeIndex++];
if(codeIndex==codeLen)
{
codeIndex=0;
fread(&readedCh,sizeof(char),1,fp);
strcpy(code,huffcodes[(int)readedCh]);
codeLen=strlen(code);
}
}
fprintf(compressedFile,"%c",binTochar(buffer1));
char buffer[9]="";
int bufferIndex=0;
int flag=0;
int buf;
while(1)
{
for(bufferIndex=0;bufferIndex<8;)
{
if(codeIndex==codeLen)
{
if(fread(&readedCh,sizeof(char),1,fp)==0)
{
flag=1;
for(i=0;i<bufferIndex;i++)
fprintf(compressedFile,"%c",buffer[i]);
break;
}
strcpy(code,huffcodes[(int)readedCh]);
codeLen=strlen(code);
codeIndex=0;
}
buffer[bufferIndex++]=code[codeIndex++];
}
if(flag==1)
break;
buf=binTochar(buffer);
fprintf(compressedFile,"%c",(char)buf);
strcpy(buffer,"");
charCount++;
}
fprintf(compressedFile,"&%d",charCount);
fclose(fp);
fclose(compressedFile);
freeAllocatedMemory(root);
CudaFree(qu);
printf("File successfully compressed");
return 0;
}
|
eb76355d0df49ece072906fde1b81f6e5f87d739.hip | // !!! This is a file automatically generated by hipify!!!
// Compressed sparse row format
// Rows transmit to columns
#include <math.h>
#include "hip/hip_runtime.h"
#include "bfs_csr_kernel.h"
__global__ void breadth_first_search_csr_gpu(unsigned int* cum_row_indexes, unsigned int* column_indexes,
int* matrix_data, unsigned int* in_infections,
unsigned int* out_infections, unsigned int rows) {
unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < rows) {
if (in_infections[row] == 1) {
out_infections[row] = 1;
unsigned int row_start = cum_row_indexes[row];
unsigned int row_end = cum_row_indexes[row+1];
for (int i = row_start; i < row_end; i++) {
int timesteps_to_transmission = matrix_data[i];
if (timesteps_to_transmission != 0) {
if (timesteps_to_transmission == 1) {
out_infections[column_indexes[i]] = 1;
}
matrix_data[i] -= 1;
}
}
}
}
}
void internal_breadth_first_search_csr_gpu(unsigned int* cum_row_indexes, unsigned int* column_indexes,
int* matrix_data, unsigned int* in_infections,
unsigned int* out_infections, unsigned int rows) {
// declare the number of blocks per grid and the number of threads per block
// use 1 to 512 threads per block
dim3 threadsPerBlock(rows);
dim3 blocksPerGrid(1);
if (rows > 512) {
threadsPerBlock.x = 512;
blocksPerGrid.x = ceil(double(rows)/double(threadsPerBlock.x));
}
hipLaunchKernelGGL(( breadth_first_search_csr_gpu), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, cum_row_indexes,
column_indexes, matrix_data, in_infections, out_infections, rows);
}
| eb76355d0df49ece072906fde1b81f6e5f87d739.cu | // Compressed sparse row format
// Rows transmit to columns
#include <math.h>
#include "cuda_runtime.h"
#include "bfs_csr_kernel.h"
__global__ void breadth_first_search_csr_gpu(unsigned int* cum_row_indexes, unsigned int* column_indexes,
int* matrix_data, unsigned int* in_infections,
unsigned int* out_infections, unsigned int rows) {
unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < rows) {
if (in_infections[row] == 1) {
out_infections[row] = 1;
unsigned int row_start = cum_row_indexes[row];
unsigned int row_end = cum_row_indexes[row+1];
for (int i = row_start; i < row_end; i++) {
int timesteps_to_transmission = matrix_data[i];
if (timesteps_to_transmission != 0) {
if (timesteps_to_transmission == 1) {
out_infections[column_indexes[i]] = 1;
}
matrix_data[i] -= 1;
}
}
}
}
}
void internal_breadth_first_search_csr_gpu(unsigned int* cum_row_indexes, unsigned int* column_indexes,
int* matrix_data, unsigned int* in_infections,
unsigned int* out_infections, unsigned int rows) {
// declare the number of blocks per grid and the number of threads per block
// use 1 to 512 threads per block
dim3 threadsPerBlock(rows);
dim3 blocksPerGrid(1);
if (rows > 512) {
threadsPerBlock.x = 512;
blocksPerGrid.x = ceil(double(rows)/double(threadsPerBlock.x));
}
breadth_first_search_csr_gpu<<<blocksPerGrid,threadsPerBlock>>>(cum_row_indexes,
column_indexes, matrix_data, in_infections, out_infections, rows);
}
|
5d77922920026eed513a5473581261e7615515ba.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <float.h>
#include "nccl.h"
#include "test_utilities.h"
#include <roctracer/roctx.h>
void showUsage(const char* bin) {
printf("\n"
"Usage: %s <type> <op> <n_min> <n_max> [delta] [gpus] [gpu0 [gpu1 [...]]]\n"
"Where:\n"
#ifdef CUDA_HAS_HALF
" type = [char|int|half|float|double|int64|uint64]\n"
#else
" type = [char|int|float|double|int64|uint64]\n"
#endif
" op = [sum|prod|max|min]\n"
" n_min > 0\n"
" n_max >= n_min\n"
" delta > 0\n\n", bin);
return;
}
int main(int argc, char* argv[]) {
int nvis = 0;
CUDACHECK(hipGetDeviceCount(&nvis));
if (nvis == 0) {
printf("No GPUs found\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
ncclDataType_t type;
ncclRedOp_t op;
int n_min;
int n_max;
int delta;
int gpus;
int* list = NULL;
if (argc < 5) {
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
type = strToType(argv[1]);
if (type == nccl_NUM_TYPES) {
printf("Invalid <type> '%s'\n", argv[1]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
op = strToOp(argv[2]);
if (op == nccl_NUM_OPS) {
printf("Invalid <op> '%s'\n", argv[2]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_min = strToPosInt(argv[3]);
if (n_min < 1) {
printf("Invalid <n_min> '%s'\n", argv[3]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_max = strToPosInt(argv[4]);
if (n_max < n_min) {
printf("Invalid <n_max> '%s'\n", argv[4]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
if (argc > 5) {
delta = strToPosInt(argv[5]);
if (delta < 1) {
printf("Invalid <delta> '%s'\n", argv[5]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
delta = (n_max == n_min) ? 1 : (n_max - n_min+9) / 10;
}
if (argc > 6) {
gpus = strToPosInt(argv[6]);
if (gpus < 1) {
printf("Invalid <gpus> '%s'\n", argv[6]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
gpus = nvis;
}
list = (int*)malloc(gpus*sizeof(int));
if (argc > 7 && argc != 7+gpus) {
printf("If given, GPU list must be fully specified.\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
for(int g=0; g<gpus; ++g) {
if(argc > 7) {
list[g] = strToNonNeg(argv[7+g]);
if (list[g] < 0) {
printf("Invalid GPU%d '%s'\n", g, argv[7+g]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
} else if (list[g] >= nvis) {
printf("GPU%d (%d) exceeds visible devices (%d)\n", g, list[g], nvis);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
list[g] = g % nvis;
}
}
size_t word = wordSize(type);
size_t max_size = n_max * word;
void* refout;
CUDACHECK(hipHostMalloc(&refout, max_size));
void** input;
void* output; // always goes on rank 0
double* maxError;
ncclComm_t* comm;
hipStream_t* stream;
input = (void**)malloc(gpus*sizeof(void*));
comm = (ncclComm_t*)malloc(gpus*sizeof(ncclComm_t));
stream = (hipStream_t*)malloc(gpus*sizeof(hipStream_t));
for(int g=0; g<gpus; ++g) {
char busid[32] = {0};
CUDACHECK(hipDeviceGetPCIBusId(busid, 32, list[g]));
printf("# Rank %d using device %d [%s]\n", g, list[g], busid);
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamCreate(&stream[g]));
CUDACHECK(hipMalloc(&input[g], max_size));
makeRandom(input[g], n_max, type, 42+g);
if (g == 0) {
CUDACHECK(hipMalloc(&output, max_size));
CUDACHECK(hipHostMalloc(&maxError, sizeof(double)));
CUDACHECK(hipMemcpy(refout, input[g], max_size, hipMemcpyDeviceToHost));
} else {
accVec(refout, input[g], n_max, type, op);
}
}
NCCLCHECK(ncclCommInitAll(comm, gpus, list));
printf(" BYTES ERROR MSEC BW\n");
for(int n=n_min; n<=n_max; n+=delta) {
size_t bytes = word * n;
CUDACHECK(hipSetDevice(list[0]));
CUDACHECK(hipMemsetAsync(output, 0, bytes, stream[0]));
for(int g=0; g<gpus; ++g)
CUDACHECK(hipStreamSynchronize(stream[0]));
auto start = std::chrono::high_resolution_clock::now();
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
NCCLCHECK(ncclReduce(input[g], output, n, type, op, 0, comm[g], stream[g]));
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamSynchronize(stream[g]));
}
auto stop = std::chrono::high_resolution_clock::now();
double ms = std::chrono::duration_cast<std::chrono::duration<double>>
(stop - start).count() * 1000.0;
CUDACHECK(hipSetDevice(list[0]));
maxDiff(maxError, output, refout, n, type, stream[0]);
CUDACHECK(hipStreamSynchronize(stream[0]));
double mb = (double)bytes * 1.e-6;
double algbw = mb / ms;
printf("%12lu %5.0le %10.3lf %6.2lf\n",
n*word, *maxError, ms, algbw);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamDestroy(stream[g]));
ncclCommDestroy(comm[g]);
CUDACHECK(hipFree(input[g]));
if(g == 0) {
CUDACHECK(hipFree(output));
CUDACHECK(hipHostFree(maxError));
}
}
free(input);
free(comm);
free(stream);
CUDACHECK(hipHostFree(refout));
exit(EXIT_SUCCESS);
}
| 5d77922920026eed513a5473581261e7615515ba.cu | /*************************************************************************
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <float.h>
#include "nccl.h"
#include "test_utilities.h"
#include <nvToolsExt.h>
void showUsage(const char* bin) {
printf("\n"
"Usage: %s <type> <op> <n_min> <n_max> [delta] [gpus] [gpu0 [gpu1 [...]]]\n"
"Where:\n"
#ifdef CUDA_HAS_HALF
" type = [char|int|half|float|double|int64|uint64]\n"
#else
" type = [char|int|float|double|int64|uint64]\n"
#endif
" op = [sum|prod|max|min]\n"
" n_min > 0\n"
" n_max >= n_min\n"
" delta > 0\n\n", bin);
return;
}
int main(int argc, char* argv[]) {
int nvis = 0;
CUDACHECK(cudaGetDeviceCount(&nvis));
if (nvis == 0) {
printf("No GPUs found\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
ncclDataType_t type;
ncclRedOp_t op;
int n_min;
int n_max;
int delta;
int gpus;
int* list = NULL;
if (argc < 5) {
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
type = strToType(argv[1]);
if (type == nccl_NUM_TYPES) {
printf("Invalid <type> '%s'\n", argv[1]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
op = strToOp(argv[2]);
if (op == nccl_NUM_OPS) {
printf("Invalid <op> '%s'\n", argv[2]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_min = strToPosInt(argv[3]);
if (n_min < 1) {
printf("Invalid <n_min> '%s'\n", argv[3]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_max = strToPosInt(argv[4]);
if (n_max < n_min) {
printf("Invalid <n_max> '%s'\n", argv[4]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
if (argc > 5) {
delta = strToPosInt(argv[5]);
if (delta < 1) {
printf("Invalid <delta> '%s'\n", argv[5]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
delta = (n_max == n_min) ? 1 : (n_max - n_min+9) / 10;
}
if (argc > 6) {
gpus = strToPosInt(argv[6]);
if (gpus < 1) {
printf("Invalid <gpus> '%s'\n", argv[6]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
gpus = nvis;
}
list = (int*)malloc(gpus*sizeof(int));
if (argc > 7 && argc != 7+gpus) {
printf("If given, GPU list must be fully specified.\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
for(int g=0; g<gpus; ++g) {
if(argc > 7) {
list[g] = strToNonNeg(argv[7+g]);
if (list[g] < 0) {
printf("Invalid GPU%d '%s'\n", g, argv[7+g]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
} else if (list[g] >= nvis) {
printf("GPU%d (%d) exceeds visible devices (%d)\n", g, list[g], nvis);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
list[g] = g % nvis;
}
}
size_t word = wordSize(type);
size_t max_size = n_max * word;
void* refout;
CUDACHECK(cudaMallocHost(&refout, max_size));
void** input;
void* output; // always goes on rank 0
double* maxError;
ncclComm_t* comm;
cudaStream_t* stream;
input = (void**)malloc(gpus*sizeof(void*));
comm = (ncclComm_t*)malloc(gpus*sizeof(ncclComm_t));
stream = (cudaStream_t*)malloc(gpus*sizeof(cudaStream_t));
for(int g=0; g<gpus; ++g) {
char busid[32] = {0};
CUDACHECK(cudaDeviceGetPCIBusId(busid, 32, list[g]));
printf("# Rank %d using device %d [%s]\n", g, list[g], busid);
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamCreate(&stream[g]));
CUDACHECK(cudaMalloc(&input[g], max_size));
makeRandom(input[g], n_max, type, 42+g);
if (g == 0) {
CUDACHECK(cudaMalloc(&output, max_size));
CUDACHECK(cudaMallocHost(&maxError, sizeof(double)));
CUDACHECK(cudaMemcpy(refout, input[g], max_size, cudaMemcpyDeviceToHost));
} else {
accVec(refout, input[g], n_max, type, op);
}
}
NCCLCHECK(ncclCommInitAll(comm, gpus, list));
printf(" BYTES ERROR MSEC BW\n");
for(int n=n_min; n<=n_max; n+=delta) {
size_t bytes = word * n;
CUDACHECK(cudaSetDevice(list[0]));
CUDACHECK(cudaMemsetAsync(output, 0, bytes, stream[0]));
for(int g=0; g<gpus; ++g)
CUDACHECK(cudaStreamSynchronize(stream[0]));
auto start = std::chrono::high_resolution_clock::now();
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
NCCLCHECK(ncclReduce(input[g], output, n, type, op, 0, comm[g], stream[g]));
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamSynchronize(stream[g]));
}
auto stop = std::chrono::high_resolution_clock::now();
double ms = std::chrono::duration_cast<std::chrono::duration<double>>
(stop - start).count() * 1000.0;
CUDACHECK(cudaSetDevice(list[0]));
maxDiff(maxError, output, refout, n, type, stream[0]);
CUDACHECK(cudaStreamSynchronize(stream[0]));
double mb = (double)bytes * 1.e-6;
double algbw = mb / ms;
printf("%12lu %5.0le %10.3lf %6.2lf\n",
n*word, *maxError, ms, algbw);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamDestroy(stream[g]));
ncclCommDestroy(comm[g]);
CUDACHECK(cudaFree(input[g]));
if(g == 0) {
CUDACHECK(cudaFree(output));
CUDACHECK(cudaFreeHost(maxError));
}
}
free(input);
free(comm);
free(stream);
CUDACHECK(cudaFreeHost(refout));
exit(EXIT_SUCCESS);
}
|
bruteforcekernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <dacrt/dacrt.h>
#include <util/cutimer.h>
#define TRI_SHARED_MEMORY_SPACE 256
extern "C"
__global__
void segmentedBruteForce(RayArray rays, TriangleArray triangles,
int* buffered_ray_ids, int ray_buffer_occupied,
int* buffered_tri_ids, int tri_buffer_occupied,
int* ray_segment_sizes, int* tri_segment_sizes,
int* ray_segment_start, // this would be an exclusive scan array that gives us where each segment starts
int* tri_segment_start, // the same applies here
int num_segments, // data not required.!! REMOVE
float* maxts,
int* hitids,
int num_threads_launched, // DEBUG INFO - Not required REMOVE
int num_blocks_launched // DEBUG INFO - Not required REMOVE
) {
// we have to find a way to count number of segments this block of threads will be doing
// then we have to split the workingset size so that each thread group within the block can work on its copy of shared memory only
// update accordingly
// this ensures we dont lose parallelism at all
__shared__ float3 v0[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v1[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v2[TRI_SHARED_MEMORY_SPACE];
__shared__ int triangle_ids[TRI_SHARED_MEMORY_SPACE];
__shared__ int num_tris_to_process;
__shared__ int num_rays_to_process;
__shared__ int ray_offset;
__shared__ int tri_offset;
__shared__ int tri_batches_to_process;
__shared__ int ray_batches_to_process;
// we can put the FLT_MAX and hitid variables also inside shared variables
__shared__ float fmaxts[TRI_SHARED_MEMORY_SPACE];
__shared__ float hitid[TRI_SHARED_MEMORY_SPACE];
//int tidx = threadIdx.x + blockIdx.x * blockDim.x;
// we might have an unequal number of rays/triangles for threads to process
/****************************************************************************/
// NOTE: We are launching one block of fixed size threads for each segment
// This simplifies the working model a bit, but there might be cases where the work load is not uniform. But that is in the next step.
// these data are all common to all the blocks
if(threadIdx.x == 0) {
num_tris_to_process = tri_segment_sizes[blockIdx.x];
num_rays_to_process = ray_segment_sizes[blockIdx.x];
ray_offset = ray_segment_start[blockIdx.x];
tri_offset = tri_segment_start[blockIdx.x];
tri_batches_to_process = (num_tris_to_process/blockDim.x) + (num_tris_to_process % blockDim.x != 0);
ray_batches_to_process = (num_rays_to_process/blockDim.x) + (num_rays_to_process % blockDim.x != 0);
}
int ridx = 0;
int ray_batch = 0;
int this_time_rays = 0;
int rayid;
__syncthreads();
// I can put these variables outside also..~
// we do a batch wise loading of rays which in turn do their operation batch wise on triangles
do {
// these two variables below are per ray variables
// THey have to be refreshed every time a new ray is picked up for processing.
// Hence I've put them inside this loop and not outside.
//float fmaxts = FLT_MAX;
//int hitid = -1;
fmaxts[threadIdx.x] = FLT_MAX;
hitid[threadIdx.x] = -1;
if(num_rays_to_process - ridx > blockDim.x) {
this_time_rays = blockDim.x;
ridx += blockDim.x;
} else {
this_time_rays = num_rays_to_process - ridx;
}
// now do a batch load of triangles
int tid = 0;
int tri_batch = 0;
int this_time_tris = 0;
int temp;
do {
if(num_tris_to_process - tid > TRI_SHARED_MEMORY_SPACE) {
this_time_tris = TRI_SHARED_MEMORY_SPACE;
tid += TRI_SHARED_MEMORY_SPACE;
} else {
this_time_tris = num_tris_to_process - tid;
}
if(threadIdx.x < this_time_tris) {
int triid = buffered_tri_ids[tri_offset + threadIdx.x + tri_batch * TRI_SHARED_MEMORY_SPACE];
v0[threadIdx.x] = triangles.v0[triid];
v1[threadIdx.x] = triangles.v1[triid];
v2[threadIdx.x] = triangles.v2[triid];
triangle_ids[threadIdx.x] = triid;
}
__syncthreads();
// Note: the logic behind using this 'if' condition here and not outside block is that, when we might have total rays to be less than
// number of threads, we might have a large number of triangles to load. So we just let all threads take some part in loading
// as many number of triangles as possible, while intersection is carried out only with the required rays
if(threadIdx.x < this_time_rays) {
temp = ray_offset + threadIdx.x + ray_batch * blockDim.x; // for writing into final global array
rayid = buffered_ray_ids[temp];
Ray ir(rays.o[rayid], rays.d[rayid]);;
for(int t = 0; t < this_time_tris; t++) {
Triangle it(v0[t], v1[t], v2[t]);
double u, v, xt;
if(rayIntersect<double>(it, ir, u, v, xt)) {
if(xt > 0 && (float)xt < fmaxts[threadIdx.x]) {
fmaxts[threadIdx.x] = xt;
// calculating the id of the hit variable?
hitid[threadIdx.x] = triangle_ids[t];
}
}
}
// now update the global buffered_hit_id and buffered_maxts array
// NOTE: logic is also wrong here.!!! we have to find the correct threadid location to put our variables.
//maxts[tidx] = fmaxts;
//hitids[tidx] = hitid;
}
__syncthreads();
tri_batch++;
}while(tri_batch < tri_batches_to_process);
// update rays and their global ids
if(threadIdx.x < this_time_rays) {
maxts[temp] = fmaxts[threadIdx.x];
hitids[temp] = hitid[threadIdx.x];
}
__syncthreads();
ray_batch++;
} while(ray_batch < ray_batches_to_process);
}
// update min values in the global array
extern "C"
__global__ void updateMinKernel(int* ray_id, float* min_hits, int* minhit_ids, float* global_min, int* global_hits, int num_rays) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < num_rays) {
int rayid = ray_id[tid];
float xhit = min_hits[tid];
int hitid = minhit_ids[tid];
if(xhit < global_min[rayid]) {
global_min[rayid] = xhit;
global_hits[rayid] = hitid;
}
}
}
extern "C"
__global__ void dacrtBruteForce(TriangleArray dev_triangles, int num_triangles, RayArray dev_rays, int num_rays,
int* tri_idx_array, // this array will have the triangles
int tricnt, // number of triangles to be considered
int* ray_idx_array, // ray ids
int raycnt, // number of rays to be considered
float* maxts, // maxts value
int* hitids
) {
__shared__ float3 v0[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v1[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v2[TRI_SHARED_MEMORY_SPACE];
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
float fmaxts = FLT_MAX;
int hitid = -1;
int to_load = tricnt / TRI_SHARED_MEMORY_SPACE + (tricnt % TRI_SHARED_MEMORY_SPACE != 0);
int loaded = 0;
int idx = 0;
int this_time_triangles = 0;
do {
// first load the first TRI_SHARED_MEMORY_SPACE triangles or less
if((tricnt - idx) >= TRI_SHARED_MEMORY_SPACE) { this_time_triangles = TRI_SHARED_MEMORY_SPACE; idx += TRI_SHARED_MEMORY_SPACE; }
else this_time_triangles = tricnt - idx;
if(threadIdx.x < this_time_triangles) {
v0[threadIdx.x] = dev_triangles.v0[tri_idx_array[threadIdx.x + loaded * TRI_SHARED_MEMORY_SPACE]]; // move to the next 512
v1[threadIdx.x] = dev_triangles.v1[tri_idx_array[threadIdx.x + loaded * TRI_SHARED_MEMORY_SPACE]];
v2[threadIdx.x] = dev_triangles.v2[tri_idx_array[threadIdx.x + loaded * TRI_SHARED_MEMORY_SPACE]];
}
__syncthreads();
// now perform ray intersection
if(tidx < raycnt) {
double u, v, xt;
Ray ir(dev_rays.o[ray_idx_array[tidx]], dev_rays.d[ray_idx_array[tidx]]);
for(int t = 0; t < this_time_triangles; t++) {
if(rayIntersect<double>(Triangle(v0[t], v1[t], v2[t]), ir, u, v, xt)) {
if(xt > 0 && static_cast<float>(xt) < fmaxts) {
fmaxts = static_cast<float>(xt);
hitid = tri_idx_array[t + loaded * TRI_SHARED_MEMORY_SPACE];
}
}
}
}
__syncthreads();
// go to next segment
// NOTE: This was a bug. Keeping the loaded variable increment within the if condition, made all other threads go in loop forever.!!! because they
// never incremented the value.!!
loaded++;
} while(loaded < to_load);
// update the arrays
if(tidx < raycnt) {
//maxts[tidx] = fmaxts;
//hitids[tidx] = hitid;
if(maxts[ray_idx_array[tidx]] > fmaxts && hitid != -1) {
maxts[ray_idx_array[tidx]] = fmaxts;
hitids[ray_idx_array[tidx]] = hitid;
}
}
__syncthreads(); // expensive? check correctness and remove
}
// add a modified segmentedBruteForce kernel
extern "C"
__global__ void modifiedSegmentedBruteForce(RayArray rays, TriangleArray triangles, int* buffered_ray_ids, int ray_buffer_occupied, int* buffered_tri_ids, int tri_buffer_occupied,
int* ray_segment_sizes, int* tri_segment_sizes, int* ray_segment_start, int* tri_segment_start,
int* segment_no, int* blockStart,
float* maxts, int* hitids
) {
__shared__ float3 v0[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v1[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v2[TRI_SHARED_MEMORY_SPACE];
__shared__ int triangle_ids[TRI_SHARED_MEMORY_SPACE];
__shared__ float fmaxts[TRI_SHARED_MEMORY_SPACE];
__shared__ float hitid[TRI_SHARED_MEMORY_SPACE];
__shared__ int num_tris_to_process;
__shared__ int num_rays_to_process;
__shared__ int ray_offset;
__shared__ int tri_offset;
__shared__ int tri_batches_to_process;
__shared__ int blockNo;
__shared__ int whichBlock;
__shared__ int tid;
__shared__ int this_time_tris;
__shared__ int threadId_within_segment[TRI_SHARED_MEMORY_SPACE];
__shared__ int temp[TRI_SHARED_MEMORY_SPACE];
__shared__ int tri_batch[TRI_SHARED_MEMORY_SPACE];
if(threadIdx.x == 0) {
blockNo = segment_no[blockIdx.x]; // load which segment you are
whichBlock = blockStart[blockIdx.x];
tid = 0;
num_tris_to_process = tri_segment_sizes[blockNo];
num_rays_to_process = ray_segment_sizes[blockNo];
ray_offset = ray_segment_start[blockNo]; // this where this block's threads actually start
tri_offset = tri_segment_start[blockNo];
tri_batches_to_process = (num_tris_to_process/blockDim.x) + (num_tris_to_process % blockDim.x != 0);
//ray_batches_to_process = (num_rays_to_process/blockDim.x) + (num_rays_to_process % blockDim.x != 0);
}
__syncthreads();
//int threadId_within_segment = threadIdx.x + whichBlock * 256;
threadId_within_segment[threadIdx.x] = threadIdx.x + whichBlock * TRI_SHARED_MEMORY_SPACE; // this is supposed to be whichBlock * num_Rays_per_block
// this id should be less than each segment's total rays to be handled
//if(threadId_within_segment < num_rays_to_process) {
// here we do batch loading of triangles and process them.
fmaxts[threadIdx.x] = FLT_MAX;
hitid[threadIdx.x] = -1;
// QUESTION? can all these be shared variables?
//__shared__ int tid = 0;
//int tri_batch = 0;
tri_batch[threadIdx.x] = 0;
//int this_time_tris = 0;
//int temp;
do {
if(threadIdx.x == 0) {
if(num_tris_to_process - tid > TRI_SHARED_MEMORY_SPACE) {
this_time_tris = TRI_SHARED_MEMORY_SPACE;
tid += TRI_SHARED_MEMORY_SPACE;
} else {
this_time_tris = num_tris_to_process - tid;
}
}
__syncthreads();
// load the triangles
if(threadIdx.x < this_time_tris) {
int triid = buffered_tri_ids[tri_offset + threadIdx.x + tri_batch[threadIdx.x] * TRI_SHARED_MEMORY_SPACE];
v0[threadIdx.x] = triangles.v0[triid];
v1[threadIdx.x] = triangles.v1[triid];
v2[threadIdx.x] = triangles.v2[triid];
triangle_ids[threadIdx.x] = triid;
}
__syncthreads();
if(threadId_within_segment[threadIdx.x] < num_rays_to_process) {
temp[threadIdx.x] = ray_offset + threadId_within_segment[threadIdx.x]; // starting ray + within this segment which ray
int rid = buffered_ray_ids[temp[threadIdx.x]];
Ray ir(rays.o[rid], rays.d[rid]);
for(int t = 0; t < this_time_tris; t++) {
Triangle it(v0[t], v1[t], v2[t]);
double u, v, xt;
if(rayIntersect<double>(it, ir, u, v, xt)) {
if(xt > 0 && (float)xt < fmaxts[threadIdx.x]) {
fmaxts[threadIdx.x] = xt;
// calculating the id of the hit variable?
hitid[threadIdx.x] = triangle_ids[t];
}
}
}
}
__syncthreads();
// increment the batch
tri_batch[threadIdx.x]++;
} while(tri_batch[threadIdx.x] < tri_batches_to_process);
//}
// this condition takes care of extra rays launched as a result of bringing the count to arbitrary multiple
// num_rays_to_process is within a segment.
if(threadId_within_segment[threadIdx.x] < num_rays_to_process) {
maxts[temp[threadIdx.x]] = fmaxts[threadIdx.x];
hitids[temp[threadIdx.x]] = hitid[threadIdx.x];
}
__syncthreads();
}
// complete the incomplete segments
// This method can be called from other segment approaches. Hence external linkage
extern "C"
void dacrtCompleteRender(ParallelPack& pack, TriangleArray& dev_triangles, RayArray& dev_rays, DacrtRunTimeParameters& rtparams, Counters& ctr) {
thrust::device_vector<int> ray_segment_start(pack.num_segments);
thrust::device_vector<int> tri_segment_start(pack.num_segments);
thrust::exclusive_scan(pack.tri_segment_sizes.begin(), pack.tri_segment_sizes.begin() + pack.num_segments, tri_segment_start.begin());
thrust::exclusive_scan(pack.ray_segment_sizes.begin(), pack.ray_segment_sizes.begin() + pack.num_segments, ray_segment_start.begin());
int num_blocks = pack.num_segments;
int num_threads_per_block = rtparams.NUM_RAYS_PER_BLOCK;
Timer seg_brute_timer("SegmentedBruteForce Timer");
seg_brute_timer.start();
hipLaunchKernelGGL(( segmentedBruteForce), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, dev_rays, dev_triangles, thrust::raw_pointer_cast(&pack.buffered_ray_idx[0]),
pack.ray_buffer_occupied, thrust::raw_pointer_cast(&pack.buffered_tri_idx[0]), pack.tri_buffer_occupied,
thrust::raw_pointer_cast(&pack.ray_segment_sizes[0]), thrust::raw_pointer_cast(&pack.tri_segment_sizes[0]),
thrust::raw_pointer_cast(&ray_segment_start[0]), thrust::raw_pointer_cast(&tri_segment_start[0]),
pack.num_segments, thrust::raw_pointer_cast(&pack.buffered_ray_maxts[0]),
thrust::raw_pointer_cast(&pack.buffered_ray_hitids[0]), num_threads_per_block * num_blocks, num_blocks);
seg_brute_timer.stop();
ctr.brute_force_time += seg_brute_timer.get_ms();
Timer seg_sort_timer("Seg Sort Timer");
seg_sort_timer.start();
thrust::sort_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied,
thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin())));
seg_sort_timer.stop();
ctr.seg_sort_time += seg_sort_timer.get_ms();
// now we have to reduce according to the key, which is the ray id
static thrust::device_vector<int> ray_idx(rtparams.BUFFER_SIZE);
static thrust::device_vector<float> ray_maxts(rtparams.BUFFER_SIZE);
static thrust::device_vector<int> ray_hitids(rtparams.BUFFER_SIZE);
static thrust::equal_to<int> pred;
typedef thrust::device_vector<int>::iterator iter;
typedef thrust::device_vector<float>::iterator fiter;
typedef thrust::zip_iterator<thrust::tuple<fiter, iter> > zippy;
thrust::pair<iter, zippy> minend;
MinHitFunctor<thrust::tuple<float, int> > min_hit_functor;
Timer reduction_timer("Reduction Timer");
reduction_timer.start();
minend = thrust::reduce_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied,
thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin())),
ray_idx.begin(), thrust::make_zip_iterator(thrust::make_tuple(ray_maxts.begin(), ray_hitids.begin())),
pred,
min_hit_functor);
reduction_timer.stop();
ctr.reduction_time += reduction_timer.get_ms();
// now we can update our global max_ts and hitid array
int num_valid_keys = minend.first - ray_idx.begin();
num_threads_per_block = 512;
num_blocks = num_valid_keys / num_threads_per_block + (num_valid_keys % num_threads_per_block != 0);
Timer update_min_timer("Update Min Timer");
update_min_timer.start();
hipLaunchKernelGGL(( updateMinKernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, thrust::raw_pointer_cast(&ray_idx[0]), thrust::raw_pointer_cast(&ray_maxts[0]), thrust::raw_pointer_cast(&ray_hitids[0]),
thrust::raw_pointer_cast(&pack.dev_ray_maxts[0]), thrust::raw_pointer_cast(&pack.dev_hitids[0]), num_valid_keys);
update_min_timer.stop();
ctr.update_min_time += update_min_timer.get_ms();
// reset the counters
ray_idx.clear();
ray_maxts.clear();
ray_hitids.clear();
pack.buffered_ray_idx.clear();
pack.buffered_tri_idx.clear();
pack.tri_segment_sizes.clear();
pack.ray_segment_sizes.clear();
pack.segment_ids.clear();
pack.ray_buffer_occupied = 0;
pack.tri_buffer_occupied = 0;
pack.num_segments = 0;
} | bruteforcekernel.cu | #include <dacrt/dacrt.h>
#include <util/cutimer.h>
#define TRI_SHARED_MEMORY_SPACE 256
extern "C"
__global__
void segmentedBruteForce(RayArray rays, TriangleArray triangles,
int* buffered_ray_ids, int ray_buffer_occupied,
int* buffered_tri_ids, int tri_buffer_occupied,
int* ray_segment_sizes, int* tri_segment_sizes,
int* ray_segment_start, // this would be an exclusive scan array that gives us where each segment starts
int* tri_segment_start, // the same applies here
int num_segments, // data not required.!! REMOVE
float* maxts,
int* hitids,
int num_threads_launched, // DEBUG INFO - Not required REMOVE
int num_blocks_launched // DEBUG INFO - Not required REMOVE
) {
// we have to find a way to count number of segments this block of threads will be doing
// then we have to split the workingset size so that each thread group within the block can work on its copy of shared memory only
// update accordingly
// this ensures we dont lose parallelism at all
__shared__ float3 v0[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v1[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v2[TRI_SHARED_MEMORY_SPACE];
__shared__ int triangle_ids[TRI_SHARED_MEMORY_SPACE];
__shared__ int num_tris_to_process;
__shared__ int num_rays_to_process;
__shared__ int ray_offset;
__shared__ int tri_offset;
__shared__ int tri_batches_to_process;
__shared__ int ray_batches_to_process;
// we can put the FLT_MAX and hitid variables also inside shared variables
__shared__ float fmaxts[TRI_SHARED_MEMORY_SPACE];
__shared__ float hitid[TRI_SHARED_MEMORY_SPACE];
//int tidx = threadIdx.x + blockIdx.x * blockDim.x;
// we might have an unequal number of rays/triangles for threads to process
/****************************************************************************/
// NOTE: We are launching one block of fixed size threads for each segment
// This simplifies the working model a bit, but there might be cases where the work load is not uniform. But that is in the next step.
// these data are all common to all the blocks
if(threadIdx.x == 0) {
num_tris_to_process = tri_segment_sizes[blockIdx.x];
num_rays_to_process = ray_segment_sizes[blockIdx.x];
ray_offset = ray_segment_start[blockIdx.x];
tri_offset = tri_segment_start[blockIdx.x];
tri_batches_to_process = (num_tris_to_process/blockDim.x) + (num_tris_to_process % blockDim.x != 0);
ray_batches_to_process = (num_rays_to_process/blockDim.x) + (num_rays_to_process % blockDim.x != 0);
}
int ridx = 0;
int ray_batch = 0;
int this_time_rays = 0;
int rayid;
__syncthreads();
// I can put these variables outside also..~
// we do a batch wise loading of rays which in turn do their operation batch wise on triangles
do {
// these two variables below are per ray variables
// THey have to be refreshed every time a new ray is picked up for processing.
// Hence I've put them inside this loop and not outside.
//float fmaxts = FLT_MAX;
//int hitid = -1;
fmaxts[threadIdx.x] = FLT_MAX;
hitid[threadIdx.x] = -1;
if(num_rays_to_process - ridx > blockDim.x) {
this_time_rays = blockDim.x;
ridx += blockDim.x;
} else {
this_time_rays = num_rays_to_process - ridx;
}
// now do a batch load of triangles
int tid = 0;
int tri_batch = 0;
int this_time_tris = 0;
int temp;
do {
if(num_tris_to_process - tid > TRI_SHARED_MEMORY_SPACE) {
this_time_tris = TRI_SHARED_MEMORY_SPACE;
tid += TRI_SHARED_MEMORY_SPACE;
} else {
this_time_tris = num_tris_to_process - tid;
}
if(threadIdx.x < this_time_tris) {
int triid = buffered_tri_ids[tri_offset + threadIdx.x + tri_batch * TRI_SHARED_MEMORY_SPACE];
v0[threadIdx.x] = triangles.v0[triid];
v1[threadIdx.x] = triangles.v1[triid];
v2[threadIdx.x] = triangles.v2[triid];
triangle_ids[threadIdx.x] = triid;
}
__syncthreads();
// Note: the logic behind using this 'if' condition here and not outside block is that, when we might have total rays to be less than
// number of threads, we might have a large number of triangles to load. So we just let all threads take some part in loading
// as many number of triangles as possible, while intersection is carried out only with the required rays
if(threadIdx.x < this_time_rays) {
temp = ray_offset + threadIdx.x + ray_batch * blockDim.x; // for writing into final global array
rayid = buffered_ray_ids[temp];
Ray ir(rays.o[rayid], rays.d[rayid]);;
for(int t = 0; t < this_time_tris; t++) {
Triangle it(v0[t], v1[t], v2[t]);
double u, v, xt;
if(rayIntersect<double>(it, ir, u, v, xt)) {
if(xt > 0 && (float)xt < fmaxts[threadIdx.x]) {
fmaxts[threadIdx.x] = xt;
// calculating the id of the hit variable?
hitid[threadIdx.x] = triangle_ids[t];
}
}
}
// now update the global buffered_hit_id and buffered_maxts array
// NOTE: logic is also wrong here.!!! we have to find the correct threadid location to put our variables.
//maxts[tidx] = fmaxts;
//hitids[tidx] = hitid;
}
__syncthreads();
tri_batch++;
}while(tri_batch < tri_batches_to_process);
// update rays and their global ids
if(threadIdx.x < this_time_rays) {
maxts[temp] = fmaxts[threadIdx.x];
hitids[temp] = hitid[threadIdx.x];
}
__syncthreads();
ray_batch++;
} while(ray_batch < ray_batches_to_process);
}
// update min values in the global array
extern "C"
__global__ void updateMinKernel(int* ray_id, float* min_hits, int* minhit_ids, float* global_min, int* global_hits, int num_rays) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < num_rays) {
int rayid = ray_id[tid];
float xhit = min_hits[tid];
int hitid = minhit_ids[tid];
if(xhit < global_min[rayid]) {
global_min[rayid] = xhit;
global_hits[rayid] = hitid;
}
}
}
extern "C"
__global__ void dacrtBruteForce(TriangleArray dev_triangles, int num_triangles, RayArray dev_rays, int num_rays,
int* tri_idx_array, // this array will have the triangles
int tricnt, // number of triangles to be considered
int* ray_idx_array, // ray ids
int raycnt, // number of rays to be considered
float* maxts, // maxts value
int* hitids
) {
__shared__ float3 v0[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v1[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v2[TRI_SHARED_MEMORY_SPACE];
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
float fmaxts = FLT_MAX;
int hitid = -1;
int to_load = tricnt / TRI_SHARED_MEMORY_SPACE + (tricnt % TRI_SHARED_MEMORY_SPACE != 0);
int loaded = 0;
int idx = 0;
int this_time_triangles = 0;
do {
// first load the first TRI_SHARED_MEMORY_SPACE triangles or less
if((tricnt - idx) >= TRI_SHARED_MEMORY_SPACE) { this_time_triangles = TRI_SHARED_MEMORY_SPACE; idx += TRI_SHARED_MEMORY_SPACE; }
else this_time_triangles = tricnt - idx;
if(threadIdx.x < this_time_triangles) {
v0[threadIdx.x] = dev_triangles.v0[tri_idx_array[threadIdx.x + loaded * TRI_SHARED_MEMORY_SPACE]]; // move to the next 512
v1[threadIdx.x] = dev_triangles.v1[tri_idx_array[threadIdx.x + loaded * TRI_SHARED_MEMORY_SPACE]];
v2[threadIdx.x] = dev_triangles.v2[tri_idx_array[threadIdx.x + loaded * TRI_SHARED_MEMORY_SPACE]];
}
__syncthreads();
// now perform ray intersection
if(tidx < raycnt) {
double u, v, xt;
Ray ir(dev_rays.o[ray_idx_array[tidx]], dev_rays.d[ray_idx_array[tidx]]);
for(int t = 0; t < this_time_triangles; t++) {
if(rayIntersect<double>(Triangle(v0[t], v1[t], v2[t]), ir, u, v, xt)) {
if(xt > 0 && static_cast<float>(xt) < fmaxts) {
fmaxts = static_cast<float>(xt);
hitid = tri_idx_array[t + loaded * TRI_SHARED_MEMORY_SPACE];
}
}
}
}
__syncthreads();
// go to next segment
// NOTE: This was a bug. Keeping the loaded variable increment within the if condition, made all other threads go in loop forever.!!! because they
// never incremented the value.!!
loaded++;
} while(loaded < to_load);
// update the arrays
if(tidx < raycnt) {
//maxts[tidx] = fmaxts;
//hitids[tidx] = hitid;
if(maxts[ray_idx_array[tidx]] > fmaxts && hitid != -1) {
maxts[ray_idx_array[tidx]] = fmaxts;
hitids[ray_idx_array[tidx]] = hitid;
}
}
__syncthreads(); // expensive? check correctness and remove
}
// add a modified segmentedBruteForce kernel
extern "C"
__global__ void modifiedSegmentedBruteForce(RayArray rays, TriangleArray triangles, int* buffered_ray_ids, int ray_buffer_occupied, int* buffered_tri_ids, int tri_buffer_occupied,
int* ray_segment_sizes, int* tri_segment_sizes, int* ray_segment_start, int* tri_segment_start,
int* segment_no, int* blockStart,
float* maxts, int* hitids
) {
__shared__ float3 v0[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v1[TRI_SHARED_MEMORY_SPACE];
__shared__ float3 v2[TRI_SHARED_MEMORY_SPACE];
__shared__ int triangle_ids[TRI_SHARED_MEMORY_SPACE];
__shared__ float fmaxts[TRI_SHARED_MEMORY_SPACE];
__shared__ float hitid[TRI_SHARED_MEMORY_SPACE];
__shared__ int num_tris_to_process;
__shared__ int num_rays_to_process;
__shared__ int ray_offset;
__shared__ int tri_offset;
__shared__ int tri_batches_to_process;
__shared__ int blockNo;
__shared__ int whichBlock;
__shared__ int tid;
__shared__ int this_time_tris;
__shared__ int threadId_within_segment[TRI_SHARED_MEMORY_SPACE];
__shared__ int temp[TRI_SHARED_MEMORY_SPACE];
__shared__ int tri_batch[TRI_SHARED_MEMORY_SPACE];
if(threadIdx.x == 0) {
blockNo = segment_no[blockIdx.x]; // load which segment you are
whichBlock = blockStart[blockIdx.x];
tid = 0;
num_tris_to_process = tri_segment_sizes[blockNo];
num_rays_to_process = ray_segment_sizes[blockNo];
ray_offset = ray_segment_start[blockNo]; // this where this block's threads actually start
tri_offset = tri_segment_start[blockNo];
tri_batches_to_process = (num_tris_to_process/blockDim.x) + (num_tris_to_process % blockDim.x != 0);
//ray_batches_to_process = (num_rays_to_process/blockDim.x) + (num_rays_to_process % blockDim.x != 0);
}
__syncthreads();
//int threadId_within_segment = threadIdx.x + whichBlock * 256;
threadId_within_segment[threadIdx.x] = threadIdx.x + whichBlock * TRI_SHARED_MEMORY_SPACE; // this is supposed to be whichBlock * num_Rays_per_block
// this id should be less than each segment's total rays to be handled
//if(threadId_within_segment < num_rays_to_process) {
// here we do batch loading of triangles and process them.
fmaxts[threadIdx.x] = FLT_MAX;
hitid[threadIdx.x] = -1;
// QUESTION? can all these be shared variables?
//__shared__ int tid = 0;
//int tri_batch = 0;
tri_batch[threadIdx.x] = 0;
//int this_time_tris = 0;
//int temp;
do {
if(threadIdx.x == 0) {
if(num_tris_to_process - tid > TRI_SHARED_MEMORY_SPACE) {
this_time_tris = TRI_SHARED_MEMORY_SPACE;
tid += TRI_SHARED_MEMORY_SPACE;
} else {
this_time_tris = num_tris_to_process - tid;
}
}
__syncthreads();
// load the triangles
if(threadIdx.x < this_time_tris) {
int triid = buffered_tri_ids[tri_offset + threadIdx.x + tri_batch[threadIdx.x] * TRI_SHARED_MEMORY_SPACE];
v0[threadIdx.x] = triangles.v0[triid];
v1[threadIdx.x] = triangles.v1[triid];
v2[threadIdx.x] = triangles.v2[triid];
triangle_ids[threadIdx.x] = triid;
}
__syncthreads();
if(threadId_within_segment[threadIdx.x] < num_rays_to_process) {
temp[threadIdx.x] = ray_offset + threadId_within_segment[threadIdx.x]; // starting ray + within this segment which ray
int rid = buffered_ray_ids[temp[threadIdx.x]];
Ray ir(rays.o[rid], rays.d[rid]);
for(int t = 0; t < this_time_tris; t++) {
Triangle it(v0[t], v1[t], v2[t]);
double u, v, xt;
if(rayIntersect<double>(it, ir, u, v, xt)) {
if(xt > 0 && (float)xt < fmaxts[threadIdx.x]) {
fmaxts[threadIdx.x] = xt;
// calculating the id of the hit variable?
hitid[threadIdx.x] = triangle_ids[t];
}
}
}
}
__syncthreads();
// increment the batch
tri_batch[threadIdx.x]++;
} while(tri_batch[threadIdx.x] < tri_batches_to_process);
//}
// this condition takes care of extra rays launched as a result of bringing the count to arbitrary multiple
// num_rays_to_process is within a segment.
if(threadId_within_segment[threadIdx.x] < num_rays_to_process) {
maxts[temp[threadIdx.x]] = fmaxts[threadIdx.x];
hitids[temp[threadIdx.x]] = hitid[threadIdx.x];
}
__syncthreads();
}
// complete the incomplete segments
// This method can be called from other segment approaches. Hence external linkage
extern "C"
void dacrtCompleteRender(ParallelPack& pack, TriangleArray& dev_triangles, RayArray& dev_rays, DacrtRunTimeParameters& rtparams, Counters& ctr) {
thrust::device_vector<int> ray_segment_start(pack.num_segments);
thrust::device_vector<int> tri_segment_start(pack.num_segments);
thrust::exclusive_scan(pack.tri_segment_sizes.begin(), pack.tri_segment_sizes.begin() + pack.num_segments, tri_segment_start.begin());
thrust::exclusive_scan(pack.ray_segment_sizes.begin(), pack.ray_segment_sizes.begin() + pack.num_segments, ray_segment_start.begin());
int num_blocks = pack.num_segments;
int num_threads_per_block = rtparams.NUM_RAYS_PER_BLOCK;
Timer seg_brute_timer("SegmentedBruteForce Timer");
seg_brute_timer.start();
segmentedBruteForce<<<num_blocks, num_threads_per_block>>>(dev_rays, dev_triangles, thrust::raw_pointer_cast(&pack.buffered_ray_idx[0]),
pack.ray_buffer_occupied, thrust::raw_pointer_cast(&pack.buffered_tri_idx[0]), pack.tri_buffer_occupied,
thrust::raw_pointer_cast(&pack.ray_segment_sizes[0]), thrust::raw_pointer_cast(&pack.tri_segment_sizes[0]),
thrust::raw_pointer_cast(&ray_segment_start[0]), thrust::raw_pointer_cast(&tri_segment_start[0]),
pack.num_segments, thrust::raw_pointer_cast(&pack.buffered_ray_maxts[0]),
thrust::raw_pointer_cast(&pack.buffered_ray_hitids[0]), num_threads_per_block * num_blocks, num_blocks);
seg_brute_timer.stop();
ctr.brute_force_time += seg_brute_timer.get_ms();
Timer seg_sort_timer("Seg Sort Timer");
seg_sort_timer.start();
thrust::sort_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied,
thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin())));
seg_sort_timer.stop();
ctr.seg_sort_time += seg_sort_timer.get_ms();
// now we have to reduce according to the key, which is the ray id
static thrust::device_vector<int> ray_idx(rtparams.BUFFER_SIZE);
static thrust::device_vector<float> ray_maxts(rtparams.BUFFER_SIZE);
static thrust::device_vector<int> ray_hitids(rtparams.BUFFER_SIZE);
static thrust::equal_to<int> pred;
typedef thrust::device_vector<int>::iterator iter;
typedef thrust::device_vector<float>::iterator fiter;
typedef thrust::zip_iterator<thrust::tuple<fiter, iter> > zippy;
thrust::pair<iter, zippy> minend;
MinHitFunctor<thrust::tuple<float, int> > min_hit_functor;
Timer reduction_timer("Reduction Timer");
reduction_timer.start();
minend = thrust::reduce_by_key(pack.buffered_ray_idx.begin(), pack.buffered_ray_idx.begin() + pack.ray_buffer_occupied,
thrust::make_zip_iterator(thrust::make_tuple(pack.buffered_ray_maxts.begin(), pack.buffered_ray_hitids.begin())),
ray_idx.begin(), thrust::make_zip_iterator(thrust::make_tuple(ray_maxts.begin(), ray_hitids.begin())),
pred,
min_hit_functor);
reduction_timer.stop();
ctr.reduction_time += reduction_timer.get_ms();
// now we can update our global max_ts and hitid array
int num_valid_keys = minend.first - ray_idx.begin();
num_threads_per_block = 512;
num_blocks = num_valid_keys / num_threads_per_block + (num_valid_keys % num_threads_per_block != 0);
Timer update_min_timer("Update Min Timer");
update_min_timer.start();
updateMinKernel<<<num_blocks, num_threads_per_block>>>(thrust::raw_pointer_cast(&ray_idx[0]), thrust::raw_pointer_cast(&ray_maxts[0]), thrust::raw_pointer_cast(&ray_hitids[0]),
thrust::raw_pointer_cast(&pack.dev_ray_maxts[0]), thrust::raw_pointer_cast(&pack.dev_hitids[0]), num_valid_keys);
update_min_timer.stop();
ctr.update_min_time += update_min_timer.get_ms();
// reset the counters
ray_idx.clear();
ray_maxts.clear();
ray_hitids.clear();
pack.buffered_ray_idx.clear();
pack.buffered_tri_idx.clear();
pack.tri_segment_sizes.clear();
pack.ray_segment_sizes.clear();
pack.segment_ids.clear();
pack.ray_buffer_occupied = 0;
pack.tri_buffer_occupied = 0;
pack.num_segments = 0;
} |
cb950bb47de9b20916732312bff27425db017957.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "common.h"
#include "pcr_kernel.cu"
void pcr(T *a, T *b, T *c, T *d, T *x, int systemSize, int numSystems)
{
const unsigned int num_threads_block = systemSize;
const unsigned int memSize = sizeof(T)*numSystems*systemSize;
// allocate device memory input and output arrays
T* d_a;
T* d_b;
T* d_c;
T* d_d;
T* d_x;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_a,memSize));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_b,memSize));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_c,memSize));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_d,memSize));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_x,memSize));
// copy host memory to device input array
CUDA_SAFE_CALL( hipMemcpy( d_a, a,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_b, b,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_c, c,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_d, d,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_x, x,memSize, hipMemcpyHostToDevice));
// setup execution parameters
dim3 grid(numSystems, 1, 1);
dim3 threads(num_threads_block, 1, 1);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutResetTimer(timer);
cutStartTimer(timer);
for(int i = 0; i < numIterations;i ++)
hipLaunchKernelGGL(( pcrKernel), dim3(grid), dim3(threads),(systemSize+1)*5*sizeof(T), 0, d_a, d_b, d_c, d_d, d_x);
hipDeviceSynchronize();
cutStopTimer(timer);
printf("pcr: numSystems: %d, systemSize: %d, GPU kernel time: %f ms\n", numSystems, systemSize, cutGetTimerValue(timer)/numIterations);
CUDA_SAFE_CALL( hipMemcpy( d_a, a,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_b, b,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_c, c,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_d, d,memSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL( hipMemcpy( d_x, x,memSize, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( pcrKernel), dim3(grid), dim3(threads),(systemSize+1)*5*sizeof(T), 0, d_a, d_b, d_c, d_d, d_x);
// copy result from device to host
CUDA_SAFE_CALL( hipMemcpy(x, d_x,memSize, hipMemcpyDeviceToHost));
// cleanup memory
CUDA_SAFE_CALL(hipFree(d_a));
CUDA_SAFE_CALL(hipFree(d_b));
CUDA_SAFE_CALL(hipFree(d_c));
CUDA_SAFE_CALL(hipFree(d_d));
CUDA_SAFE_CALL(hipFree(d_x));
}
| cb950bb47de9b20916732312bff27425db017957.cu | #include <stdio.h>
#include "common.h"
#include "pcr_kernel.cu"
void pcr(T *a, T *b, T *c, T *d, T *x, int systemSize, int numSystems)
{
const unsigned int num_threads_block = systemSize;
const unsigned int memSize = sizeof(T)*numSystems*systemSize;
// allocate device memory input and output arrays
T* d_a;
T* d_b;
T* d_c;
T* d_d;
T* d_x;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_a,memSize));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_b,memSize));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_c,memSize));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_d,memSize));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_x,memSize));
// copy host memory to device input array
CUDA_SAFE_CALL( cudaMemcpy( d_a, a,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_b, b,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_c, c,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_d, d,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_x, x,memSize, cudaMemcpyHostToDevice));
// setup execution parameters
dim3 grid(numSystems, 1, 1);
dim3 threads(num_threads_block, 1, 1);
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutResetTimer(timer);
cutStartTimer(timer);
for(int i = 0; i < numIterations;i ++)
pcrKernel<<< grid, threads,(systemSize+1)*5*sizeof(T)>>>(d_a, d_b, d_c, d_d, d_x);
cudaThreadSynchronize();
cutStopTimer(timer);
printf("pcr: numSystems: %d, systemSize: %d, GPU kernel time: %f ms\n", numSystems, systemSize, cutGetTimerValue(timer)/numIterations);
CUDA_SAFE_CALL( cudaMemcpy( d_a, a,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_b, b,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_c, c,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_d, d,memSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL( cudaMemcpy( d_x, x,memSize, cudaMemcpyHostToDevice));
pcrKernel<<< grid, threads,(systemSize+1)*5*sizeof(T)>>>(d_a, d_b, d_c, d_d, d_x);
// copy result from device to host
CUDA_SAFE_CALL( cudaMemcpy(x, d_x,memSize, cudaMemcpyDeviceToHost));
// cleanup memory
CUDA_SAFE_CALL(cudaFree(d_a));
CUDA_SAFE_CALL(cudaFree(d_b));
CUDA_SAFE_CALL(cudaFree(d_c));
CUDA_SAFE_CALL(cudaFree(d_d));
CUDA_SAFE_CALL(cudaFree(d_x));
}
|
0b1a5db7a707bc213c5b1f34a5db006a014e3acb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transpose_relu(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transpose_relud order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transpose_relud matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
} | 0b1a5db7a707bc213c5b1f34a5db006a014e3acb.cu | #include "includes.h"
__global__ void transpose_relu(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transpose_relud order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transpose_relud matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
} |
3a6713898e99564220b74ed4c4a5a907166789e4.hip | // !!! This is a file automatically generated by hipify!!!
// #include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 2D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
#define CHECK
#include<sys/time.h>
double seconds(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 2D block 1D
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("Matrix initialization elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(hipMalloc((void **)&d_MatA, nBytes));
CHECK(hipMalloc((void **)&d_MatB, nBytes));
CHECK(hipMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, ny);
iStart = seconds();
hipLaunchKernelGGL(( sumMatrixOnGPUMix), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(hipFree(d_MatA));
CHECK(hipFree(d_MatB));
CHECK(hipFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return (0);
}
| 3a6713898e99564220b74ed4c4a5a907166789e4.cu | // #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 2D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
#define CHECK
#include<sys/time.h>
double seconds(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 2D block 1D
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("Matrix initialization elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(cudaMalloc((void **)&d_MatA, nBytes));
CHECK(cudaMalloc((void **)&d_MatB, nBytes));
CHECK(cudaMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, ny);
iStart = seconds();
sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(cudaFree(d_MatA));
CHECK(cudaFree(d_MatB));
CHECK(cudaFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
eb0596e221dcc9095d391d828cd954f567e6ae1a.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 2
#define TC 16
#define C 96
#define N 64
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[16];
__shared__ float pad_temp_shared[3072];
__shared__ float kernel_shared[2304];
float pad_temp_shared_local[2];
float kernel_shared_local[8];
compute_local[(0)] = 0.000000e+00f;
compute_local[(8)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(9)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(10)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(11)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(12)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(13)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(14)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
compute_local[(15)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 2; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 55) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 55) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + (((((int)threadIdx.x) * 55) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + ((((((int)threadIdx.x) * 55) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 1) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 1) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 1) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 1) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 2) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 2) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 2) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 2) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 3) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 3) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 3) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 3) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 4) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 4) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 4) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 4) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 5) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 5) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 5) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 5) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 6) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 6) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 6) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 6) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 7) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 7) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 7) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 7) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 7) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 8) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 8) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 8) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 8) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 8) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 9))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 9) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 9) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 9) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 9) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 9) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 10))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 10) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 10) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 10) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 10) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 10) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 11))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 11) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 11) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 11) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 11) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 11) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 12))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 12) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 12) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 12) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 12) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 12) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 13))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 13) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 13) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 13) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 13) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 13) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 14))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 14) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 14) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 14) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 14) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 14) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 15))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 15) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 15) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 15) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 15) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 15) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 16))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 16) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 16) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 16) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 16) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 17))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 17) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 17) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 17) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 17) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 18))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 18) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 18) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 18) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 18) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 19))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 19) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 19) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 19) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 19) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 20))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 20) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 20) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 20) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 20) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 21))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 21) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 21) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 21) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 21) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 22))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 22) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 22) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 22) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 22) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 23))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 23) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 23) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 23) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 23) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 7) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 24))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 24) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 24) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 24) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 24) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 8) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 25))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 25) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 25) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 25) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 25) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 9) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 26))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 26) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 26) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 26) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 26) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 10) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 27))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 27) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 27) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 27) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 27) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 11) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 28))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 28) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 28) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 28) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 28) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 12) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 29))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 29) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 29) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 29) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 29) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 13) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 30))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 30) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 30) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 30) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 30) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 14) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 31))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 31) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 31) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 31) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 31) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 15) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 32))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 32) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 32) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 32) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 32) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 33))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 33) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 33) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 33) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 33) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 34))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 34) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 34) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 34) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 34) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 35))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 35) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 35) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 35) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 35) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 36))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 36) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 36) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 36) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 36) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 37))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 37) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 37) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 37) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 37) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 38))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 38) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 38) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 38) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 38) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 39))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 39) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 39) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 39) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 39) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 7) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 40))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 40) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 40) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 40) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 40) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 8) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 41))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 41) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 41) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 41) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 41) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 9) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 42))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 42) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 42) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 42) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 42) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 10) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 43))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 43) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 43) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 43) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 43) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 11) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 44))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 44) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 44) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 44) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 44) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 12) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 45))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 45) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 45) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 45) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 45) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 13) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 46))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 46) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 46) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 46) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 46) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 14) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 47))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 47) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 47) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 47) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 47) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 15) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 48))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 48) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 48) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 48) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 48) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 49))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 49) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 49) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 49) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 49) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 50))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 50) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 50) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 50) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 50) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 51))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 51) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 51) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 51) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 51) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 52))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 52) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 52) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 52) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 52) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 53))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 53) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 53) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 53) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 53) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 6)) + (((((int)threadIdx.x) * 55) + 54) >> 6)) < 48) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.y) * 24)) + (((((int)threadIdx.x) * 55) + 54) >> 4)) < 192) {
if ((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) < 3018) {
if (((((int)threadIdx.y) * 384) + (((int)threadIdx.x) * 55)) < 1482) {
if (((int)threadIdx.x) < 6) {
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 54))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 54) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 54) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 54) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 54) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
}
}
}
}
}
kernel_shared[((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 14) / 48) * 864)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 14) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 14) / 48) * 864)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 14) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 14) / 48) * 864)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 14) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 3))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 1) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 1) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 4))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 1) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 1) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 5))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 1) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 1) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 6))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 2) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 2) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 7))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 2) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 2) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 8))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 2) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 2) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 9))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 3) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 3) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 10))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 3) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 3) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 11))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 3) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 3) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 12))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 4) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 4) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 13))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 4) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 4) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 14))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 4) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 4) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 15))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 5) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 5) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 16))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 5) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 5) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 17))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 5) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 5) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 18))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 6) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 6) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 19))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 6) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 6) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 20))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 6) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 6) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 21))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 7) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 7) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 22))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 7) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 7) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 23))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 7) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 7) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 24))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 8) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 8) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 25))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 8) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 8) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 26))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 8) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 8) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 27))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 9) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 9) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 28))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 9) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 9) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 29))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 9) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 9) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 30))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 10) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 10) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 31))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 10) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 10) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 32))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 10) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 10) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 33))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 11) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 11) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 34))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 11) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 11) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 35))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 11) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 11) % 48) * 9)) + (ry_outer * 3)) + 2))];
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 12) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 756) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2268) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1116) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 36))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 12) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 12) % 48) * 9)) + (ry_outer * 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 12) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 756) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2267) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1115) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 37))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 12) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 12) % 48) * 9)) + (ry_outer * 3)) + 1))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 12) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 756) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2266) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1114) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 38))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 12) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 12) % 48) * 9)) + (ry_outer * 3)) + 2))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 13) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 755) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2265) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1113) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 39))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 13) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 13) % 48) * 9)) + (ry_outer * 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 13) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 755) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2264) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1112) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 40))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 13) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 13) % 48) * 9)) + (ry_outer * 3)) + 1))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 13) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 755) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2263) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1111) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 41))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 13) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 13) % 48) * 9)) + (ry_outer * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 48; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 1))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1152))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 144))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1296))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 288))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1440))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 432))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1584))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(1)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 1))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 2))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1153))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 145))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1297))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 289))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1441))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 433))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1585))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(1)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 2))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 3))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 2))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1154))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 146))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1298))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 290))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1442))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 434))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1586))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(1)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
}
}
}
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 6272))] = compute_local[(8)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 6273))] = compute_local[(9)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 784))] = compute_local[(2)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7056))] = compute_local[(10)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 785))] = compute_local[(3)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7057))] = compute_local[(11)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 1568))] = compute_local[(4)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7840))] = compute_local[(12)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 1569))] = compute_local[(5)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7841))] = compute_local[(13)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 2352))] = compute_local[(6)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 8624))] = compute_local[(14)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 2353))] = compute_local[(7)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 8625))] = compute_local[(15)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(2,7,4);
dim3 block(7,4,2);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| eb0596e221dcc9095d391d828cd954f567e6ae1a.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 2
#define TW 2
#define TC 16
#define C 96
#define N 64
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[16];
__shared__ float pad_temp_shared[3072];
__shared__ float kernel_shared[2304];
float pad_temp_shared_local[2];
float kernel_shared_local[8];
compute_local[(0)] = 0.000000e+00f;
compute_local[(8)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(9)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(10)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(11)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(12)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(13)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(14)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
compute_local[(15)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 2; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 55) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 55) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + (((((int)threadIdx.x) * 55) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + ((((((int)threadIdx.x) * 55) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 1) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 1) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 1) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 1) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 2) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 2) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 2) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 2) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 3) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 3) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 3) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 3) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 4) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 4) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 4) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 4) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 5) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 5) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 5) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 5) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 6) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 6) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 6) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 6) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 7) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 7) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 7) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 7) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 7) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 8) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 8) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 8) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 8) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 8) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 9))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 9) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 9) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 9) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 9) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 9) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 10))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 10) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 10) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 10) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 10) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 10) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 11))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 11) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 11) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 11) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 11) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 11) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 12))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 12) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 12) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 12) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 12) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 12) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 13))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 13) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 13) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 13) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 13) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 13) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 14))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 14) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 14) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 14) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 14) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 14) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 15))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 15) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 15) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 15) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 15) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 15) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 16))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 16) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 16) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 16) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 16) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 17))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 17) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 17) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 17) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 17) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 18))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 18) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 18) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 18) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 18) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 19))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 19) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 19) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 19) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 19) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 20))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 20) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 20) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 20) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 20) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 21))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 21) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 21) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 21) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 21) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 22))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 22) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 22) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 22) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 22) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 23))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 23) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 23) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 23) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 23) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 7) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 24))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 24) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 24) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 24) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 24) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 8) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 25))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 25) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 25) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 25) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 25) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 9) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 26))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 26) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 26) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 26) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 26) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 10) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 27))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 27) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 27) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 27) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 27) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 11) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 28))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 28) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 28) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 28) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 28) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 12) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 29))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 29) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 29) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 29) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 29) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 13) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 30))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 30) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 30) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 30) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 30) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 14) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 31))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 31) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 31) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 31) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 31) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 15) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 32))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 32) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 32) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 32) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 32) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 33))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 33) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 33) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 33) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 33) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 34))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 34) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 34) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 34) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 34) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 35))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 35) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 35) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 35) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 35) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 36))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 36) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 36) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 36) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 36) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 37))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 37) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 37) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 37) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 37) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 38))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 38) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 38) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 38) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 38) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 39))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 39) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 39) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 7) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 39) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 39) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 7) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 40))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 40) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 40) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 8) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 40) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 40) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 8) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 41))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 41) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 41) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 9) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 41) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 41) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 9) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 42))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 42) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 42) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 10) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 42) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 42) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 10) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 43))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 43) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 43) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 11) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 43) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 43) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 11) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 44))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 44) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 44) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 12) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 44) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 44) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 12) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 45))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 45) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 45) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 13) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 45) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 45) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 13) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 46))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 46) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 46) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 14) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 46) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 46) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 14) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 47))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 47) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 47) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 15) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 47) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 47) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 15) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 48))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 48) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 48) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)))) && (((((int)blockIdx.x) * 14) + ((((int)threadIdx.x) * 55) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 48) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 48) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + ((((int)threadIdx.x) * 55) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 49))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 49) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 49) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 1) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 49) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 49) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 1) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 50))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 50) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 50) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 2) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 50) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 50) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 2) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 51))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 51) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 51) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 3) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 51) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 51) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 3) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 52))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 52) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 52) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 4) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 52) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 52) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 4) & 15)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 53))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 53) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 53) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 5) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 53) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 53) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 5) & 15)) - 29))] : 0.000000e+00f);
if ((((((int)threadIdx.z) * 24) + (((int)threadIdx.y) * 6)) + (((((int)threadIdx.x) * 55) + 54) >> 6)) < 48) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.y) * 24)) + (((((int)threadIdx.x) * 55) + 54) >> 4)) < 192) {
if ((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) < 3018) {
if (((((int)threadIdx.y) * 384) + (((int)threadIdx.x) * 55)) < 1482) {
if (((int)threadIdx.x) < 6) {
pad_temp_shared[(((((((int)threadIdx.z) * 1536) + (((int)threadIdx.y) * 384)) + (((int)threadIdx.x) * 55)) + 54))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 54) & 63) >> 4)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 55) + 54) & 63) >> 4)) + ry_outer) < 29)) && (1 <= ((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)))) && (((((int)blockIdx.x) * 14) + (((((int)threadIdx.x) * 55) + 6) & 15)) < 29)) ? data[(((((((((((rc_outer * 37632) + (((int)threadIdx.z) * 18816)) + (((int)threadIdx.y) * 4704)) + ((((((int)threadIdx.x) * 55) + 54) >> 6) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 55) + 54) & 63) >> 4) * 28)) + (ry_outer * 28)) + (((int)blockIdx.x) * 14)) + (((((int)threadIdx.x) * 55) + 6) & 15)) - 29))] : 0.000000e+00f);
}
}
}
}
}
kernel_shared[((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 14) / 48) * 864)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 14) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 14) / 48) * 864)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 14) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 14) / 48) * 864)) + (rc_outer * 432)) + (((((int)threadIdx.x) * 14) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 3))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 1) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 1) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 4))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 1) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 1) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 5))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 1) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 1) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 6))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 2) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 2) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 7))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 2) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 2) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 8))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 2) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 2) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 9))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 3) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 3) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 10))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 3) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 3) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 11))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 3) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 3) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 12))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 4) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 4) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 13))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 4) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 4) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 14))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 4) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 4) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 15))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 5) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 5) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 16))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 5) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 5) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 17))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 5) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 5) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 18))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 6) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 6) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 19))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 6) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 6) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 20))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 6) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 6) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 21))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 7) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 7) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 22))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 7) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 7) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 23))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 7) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 7) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 24))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 8) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 8) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 25))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 8) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 8) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 26))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 8) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 8) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 27))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 9) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 9) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 28))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 9) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 9) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 29))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 9) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 9) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 30))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 10) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 10) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 31))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 10) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 10) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 32))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 10) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 10) % 48) * 9)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 33))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 11) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 11) % 48) * 9)) + (ry_outer * 3)))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 34))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 11) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 11) % 48) * 9)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 35))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 11) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 11) % 48) * 9)) + (ry_outer * 3)) + 2))];
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 12) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 756) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2268) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1116) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 36))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 12) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 12) % 48) * 9)) + (ry_outer * 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 12) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 756) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2267) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1115) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 37))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 12) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 12) % 48) * 9)) + (ry_outer * 3)) + 1))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 12) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 756) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2266) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1114) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 38))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 12) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 12) % 48) * 9)) + (ry_outer * 3)) + 2))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 13) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 755) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2265) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1113) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 39))] = kernel[((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 13) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 13) % 48) * 9)) + (ry_outer * 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 13) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 755) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2264) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1112) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 40))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 13) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 13) % 48) * 9)) + (ry_outer * 3)) + 1))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 14) + 13) / 48)) < 16) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 14)) < 755) {
if ((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) < 2263) {
if (((((int)threadIdx.y) * 288) + (((int)threadIdx.x) * 42)) < 1111) {
if (((int)threadIdx.x) < 6) {
kernel_shared[(((((((int)threadIdx.z) * 1152) + (((int)threadIdx.y) * 288)) + (((int)threadIdx.x) * 42)) + 41))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 14) + 13) / 48) * 864)) + (rc_outer * 432)) + ((((((int)threadIdx.x) * 14) + 13) % 48) * 9)) + (ry_outer * 3)) + 2))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 48; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 1))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1152))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 144))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1296))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 288))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1440))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 432))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1584))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(1)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 1))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 2))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1153))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 145))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1297))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 289))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1441))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 433))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1585))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(1)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 2))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 64) + (((int)threadIdx.y) * 16)) + (((int)threadIdx.x) * 2)) + 3))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 2))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1154))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 146))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1298))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 290))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1442))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 434))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 576) + (rc_inner_outer * 3)) + 1586))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(4)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(1)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(5)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(7)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
}
}
}
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 6272))] = compute_local[(8)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 6273))] = compute_local[(9)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 784))] = compute_local[(2)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7056))] = compute_local[(10)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 785))] = compute_local[(3)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7057))] = compute_local[(11)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 1568))] = compute_local[(4)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7840))] = compute_local[(12)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 1569))] = compute_local[(5)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 7841))] = compute_local[(13)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 2352))] = compute_local[(6)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 8624))] = compute_local[(14)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 2353))] = compute_local[(7)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 14)) + (((int)threadIdx.x) * 2)) + 8625))] = compute_local[(15)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(2,7,4);
dim3 block(7,4,2);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
bb16965cb991163fe8dd9621caf9727767c6ab18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This example demonstrates how to use the Cuda OpenGL bindings with the
* runtime API.
* Device code.
*/
#ifndef _SIMPLEGL_KERNEL_H_
#define _SIMPLEGL_KERNEL_H_
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(float4* pos, unsigned int mesh_width, unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, pos, mesh_width, mesh_height, time);
}
#endif // #ifndef _SIMPLEGL_KERNEL_H_
| bb16965cb991163fe8dd9621caf9727767c6ab18.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This example demonstrates how to use the Cuda OpenGL bindings with the
* runtime API.
* Device code.
*/
#ifndef _SIMPLEGL_KERNEL_H_
#define _SIMPLEGL_KERNEL_H_
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(float4* pos, unsigned int mesh_width, unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
kernel<<< grid, block>>>(pos, mesh_width, mesh_height, time);
}
#endif // #ifndef _SIMPLEGL_KERNEL_H_
|
3fc05ec74d95882d2594816dcf1e2d60d3c833f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 128
__global__ void av3(int n, float *in1, float *in2, float *in3, float *out)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
// Guarantees that index does not go beyond vector size and applies average
if (index<n)
{
out[index] = (in1[index] + in2[index] + in3[index])/3;
}
}
float *average3(int num, float *in1, float *in2, float *in3, float *out)
{
// Device copies of three inputs and output, size of allocated memory, num of threads and blocks
float *d_in1, *d_in2, *d_in3, *d_out;
int size = num * sizeof(float);
int thr, blk;
// Alloc memory for device copies of inputs and outputs
hipMalloc((void **)&d_in1, size);
hipMalloc((void **)&d_in2, size);
hipMalloc((void **)&d_in3, size);
hipMalloc((void **)&d_out, size);
// Copy inputs to device
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
hipMemcpy(d_in3, in3, size, hipMemcpyHostToDevice);
// Calculates blocks and threads and launch average3 kernel on GPU
blk=floor(num/THREADS_PER_BLOCK)+1;
thr=THREADS_PER_BLOCK;
hipLaunchKernelGGL(( av3), dim3(blk),dim3(thr), 0, 0, num, d_in1, d_in2, d_in3, d_out);
// Wait for the GPU to finish
hipDeviceSynchronize();
// Copy result back to host and cleanup
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost);
hipFree(d_in1); hipFree(d_in2); hipFree(d_in3); hipFree(d_out);
return out;
}
| 3fc05ec74d95882d2594816dcf1e2d60d3c833f6.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 128
__global__ void av3(int n, float *in1, float *in2, float *in3, float *out)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
// Guarantees that index does not go beyond vector size and applies average
if (index<n)
{
out[index] = (in1[index] + in2[index] + in3[index])/3;
}
}
float *average3(int num, float *in1, float *in2, float *in3, float *out)
{
// Device copies of three inputs and output, size of allocated memory, num of threads and blocks
float *d_in1, *d_in2, *d_in3, *d_out;
int size = num * sizeof(float);
int thr, blk;
// Alloc memory for device copies of inputs and outputs
cudaMalloc((void **)&d_in1, size);
cudaMalloc((void **)&d_in2, size);
cudaMalloc((void **)&d_in3, size);
cudaMalloc((void **)&d_out, size);
// Copy inputs to device
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in3, in3, size, cudaMemcpyHostToDevice);
// Calculates blocks and threads and launch average3 kernel on GPU
blk=floor(num/THREADS_PER_BLOCK)+1;
thr=THREADS_PER_BLOCK;
av3<<<blk,thr>>>(num, d_in1, d_in2, d_in3, d_out);
// Wait for the GPU to finish
cudaDeviceSynchronize();
// Copy result back to host and cleanup
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_in3); cudaFree(d_out);
return out;
}
|
947b80a9948c3ac3e77df8de291421ea7f47bb0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/BucketizationUtils.h>
#include <THH/THH.h>
#include <ATen/native/Resize.h>
namespace at {
namespace native {
// Implement a TF like searchsorted and a bucketize function running on cuda
// See details in ATen/nativate/Bucketization.cpp
namespace {
template<typename input_t>
__device__ int64_t lower_bound(const input_t *data_ss, int64_t start, int64_t end, input_t val) {
while (start < end) {
int64_t mid = start + ((end - start) >> 1);
if (!(data_ss[mid] >= val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t>
__device__ int64_t upper_bound(const input_t *data_ss, int64_t start, int64_t end, input_t val) {
while (start < end) {
int64_t mid = start + ((end - start) >> 1);
if (!(data_ss[mid] > val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t, typename output_t>
__global__ void searchsorted_cuda_kernel(
output_t *data_out,
const input_t *data_in,
const input_t *data_bd,
int64_t idim_in,
int64_t idim_bd,
int64_t numel_in,
bool right,
bool is_1d_boundaries) {
for (int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < numel_in; tid += blockDim.x * gridDim.x) {
// If boundaries tensor is 1d, we always search the entire boundary tensor
int64_t start_bd = is_1d_boundaries ? 0 : tid / idim_in * idim_bd;
int64_t end_bd = start_bd + idim_bd;
int64_t pos = !right ?
lower_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid]) - start_bd :
upper_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid]) - start_bd;
// type conversion might happen here
data_out[tid] = pos;
}
}
template<typename input_t, typename output_t>
void searchsorted_cuda_contiguous(Tensor& result, const Tensor& input, const Tensor& boundaries, const bool& right) {
int64_t numel_in = input.numel();
bool is_scalar_input = input.dim() == 0 && numel_in == 1;
// inner most dim size of input and boundaries
int64_t idim_in = is_scalar_input ? 1 : input.sizes().back();
int64_t idim_bd = boundaries.sizes().back();
const input_t *data_in = input.data_ptr<input_t>();
const input_t *data_bd = boundaries.data_ptr<input_t>();
output_t *data_out = result.data_ptr<output_t>();
int64_t maxThread = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
int64_t maxGrid = 1024;
dim3 block = dim3(::min(maxThread, numel_in));
dim3 grid = dim3(::min(maxGrid, ceil_div<int64_t>(numel_in, block.x)));
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( searchsorted_cuda_kernel), dim3(grid), dim3(block), 0, stream,
data_out, data_in, data_bd, idim_in, idim_bd, numel_in, right, boundaries.dim() == 1);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void dispatch(Tensor& result, const Tensor& input, const Tensor& boundaries, bool out_int32, bool right) {
if (!out_int32) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int64_t>(result, input, boundaries, right);
});
}
else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int>(result, input, boundaries, right);
});
}
}
}
Tensor& searchsorted_out_cuda(const Tensor& sorted_sequence, const Tensor& self, bool out_int32, bool right, Tensor& result) {
searchsorted_pre_check(sorted_sequence, self, result, out_int32);
at::native::resize_output(result, self.sizes());
if (self.numel() == 0) {
return result;
}
if (sorted_sequence.is_contiguous() && self.is_contiguous() && sorted_sequence.dtype() == self.dtype()) {
dispatch(result, self, sorted_sequence, out_int32, right);
return result;
}
Tensor trimmed_input;
Tensor trimmed_boundaries;
searchsorted_maybe_trim_input_tensors(trimmed_input, trimmed_boundaries, self, sorted_sequence);
const Tensor& final_input = trimmed_input.defined() ? trimmed_input : self;
const Tensor& final_boundaries = trimmed_boundaries.defined() ? trimmed_boundaries : sorted_sequence;
dispatch(result, final_input, final_boundaries, out_int32, right);
return result;
}
Tensor searchsorted_cuda(const Tensor& sorted_sequence, const Tensor& self, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::searchsorted_out_cuda(sorted_sequence, self, out_int32, right, result);
return result;
}
// See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml
Tensor _torch_cuda_cu_linker_symbol_op_cuda(const Tensor& self) {
return self;
}
Tensor searchsorted_cuda(const Tensor& sorted_sequence, const Scalar& self, bool out_int32, bool right) {
return searchsorted_cuda(sorted_sequence, searchsorted_scalar_tensor(self, sorted_sequence.device()), out_int32, right);
}
Tensor& bucketize_out_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right, Tensor& result) {
TORCH_CHECK(boundaries.dim() == 1, "boundaries tensor must be 1 dimension, but got dim(", boundaries.dim(), ")");
at::native::searchsorted_out_cuda(boundaries, self, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::bucketize_out_cuda(self, boundaries, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Scalar& self, const Tensor& boundaries, bool out_int32, bool right) {
return bucketize_cuda(searchsorted_scalar_tensor(self, boundaries.device()), boundaries, out_int32, right);
}
}} // namespace at::native
| 947b80a9948c3ac3e77df8de291421ea7f47bb0b.cu | #include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/BucketizationUtils.h>
#include <THC/THC.h>
#include <ATen/native/Resize.h>
namespace at {
namespace native {
// Implement a TF like searchsorted and a bucketize function running on cuda
// See details in ATen/nativate/Bucketization.cpp
namespace {
template<typename input_t>
__device__ int64_t lower_bound(const input_t *data_ss, int64_t start, int64_t end, input_t val) {
while (start < end) {
int64_t mid = start + ((end - start) >> 1);
if (!(data_ss[mid] >= val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t>
__device__ int64_t upper_bound(const input_t *data_ss, int64_t start, int64_t end, input_t val) {
while (start < end) {
int64_t mid = start + ((end - start) >> 1);
if (!(data_ss[mid] > val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t, typename output_t>
__global__ void searchsorted_cuda_kernel(
output_t *data_out,
const input_t *data_in,
const input_t *data_bd,
int64_t idim_in,
int64_t idim_bd,
int64_t numel_in,
bool right,
bool is_1d_boundaries) {
for (int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < numel_in; tid += blockDim.x * gridDim.x) {
// If boundaries tensor is 1d, we always search the entire boundary tensor
int64_t start_bd = is_1d_boundaries ? 0 : tid / idim_in * idim_bd;
int64_t end_bd = start_bd + idim_bd;
int64_t pos = !right ?
lower_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid]) - start_bd :
upper_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid]) - start_bd;
// type conversion might happen here
data_out[tid] = pos;
}
}
template<typename input_t, typename output_t>
void searchsorted_cuda_contiguous(Tensor& result, const Tensor& input, const Tensor& boundaries, const bool& right) {
int64_t numel_in = input.numel();
bool is_scalar_input = input.dim() == 0 && numel_in == 1;
// inner most dim size of input and boundaries
int64_t idim_in = is_scalar_input ? 1 : input.sizes().back();
int64_t idim_bd = boundaries.sizes().back();
const input_t *data_in = input.data_ptr<input_t>();
const input_t *data_bd = boundaries.data_ptr<input_t>();
output_t *data_out = result.data_ptr<output_t>();
int64_t maxThread = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
int64_t maxGrid = 1024;
dim3 block = dim3(std::min(maxThread, numel_in));
dim3 grid = dim3(std::min(maxGrid, ceil_div<int64_t>(numel_in, block.x)));
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
searchsorted_cuda_kernel<<<grid, block, 0, stream>>>(
data_out, data_in, data_bd, idim_in, idim_bd, numel_in, right, boundaries.dim() == 1);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void dispatch(Tensor& result, const Tensor& input, const Tensor& boundaries, bool out_int32, bool right) {
if (!out_int32) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int64_t>(result, input, boundaries, right);
});
}
else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int>(result, input, boundaries, right);
});
}
}
}
Tensor& searchsorted_out_cuda(const Tensor& sorted_sequence, const Tensor& self, bool out_int32, bool right, Tensor& result) {
searchsorted_pre_check(sorted_sequence, self, result, out_int32);
at::native::resize_output(result, self.sizes());
if (self.numel() == 0) {
return result;
}
if (sorted_sequence.is_contiguous() && self.is_contiguous() && sorted_sequence.dtype() == self.dtype()) {
dispatch(result, self, sorted_sequence, out_int32, right);
return result;
}
Tensor trimmed_input;
Tensor trimmed_boundaries;
searchsorted_maybe_trim_input_tensors(trimmed_input, trimmed_boundaries, self, sorted_sequence);
const Tensor& final_input = trimmed_input.defined() ? trimmed_input : self;
const Tensor& final_boundaries = trimmed_boundaries.defined() ? trimmed_boundaries : sorted_sequence;
dispatch(result, final_input, final_boundaries, out_int32, right);
return result;
}
Tensor searchsorted_cuda(const Tensor& sorted_sequence, const Tensor& self, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::searchsorted_out_cuda(sorted_sequence, self, out_int32, right, result);
return result;
}
// See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml
Tensor _torch_cuda_cu_linker_symbol_op_cuda(const Tensor& self) {
return self;
}
Tensor searchsorted_cuda(const Tensor& sorted_sequence, const Scalar& self, bool out_int32, bool right) {
return searchsorted_cuda(sorted_sequence, searchsorted_scalar_tensor(self, sorted_sequence.device()), out_int32, right);
}
Tensor& bucketize_out_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right, Tensor& result) {
TORCH_CHECK(boundaries.dim() == 1, "boundaries tensor must be 1 dimension, but got dim(", boundaries.dim(), ")");
at::native::searchsorted_out_cuda(boundaries, self, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::bucketize_out_cuda(self, boundaries, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Scalar& self, const Tensor& boundaries, bool out_int32, bool right) {
return bucketize_cuda(searchsorted_scalar_tensor(self, boundaries.device()), boundaries, out_int32, right);
}
}} // namespace at::native
|
c415397af60a003bae3f2f2f261e31dba87a20a8.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018-2020, Michael P. Howard
// Copyright (c) 2021, Auburn University
// This file is part of the azplugins project, released under the Modified BSD License.
#include "DPDPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for modified DPD potential
template hipError_t compute_dpd_potential<azplugins::detail::DPDEvaluatorGeneralWeight>
(const dpd_pair_args_t& dpd_args,
const typename azplugins::detail::DPDEvaluatorGeneralWeight::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
| c415397af60a003bae3f2f2f261e31dba87a20a8.cu | // Copyright (c) 2018-2020, Michael P. Howard
// Copyright (c) 2021, Auburn University
// This file is part of the azplugins project, released under the Modified BSD License.
#include "DPDPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for modified DPD potential
template cudaError_t compute_dpd_potential<azplugins::detail::DPDEvaluatorGeneralWeight>
(const dpd_pair_args_t& dpd_args,
const typename azplugins::detail::DPDEvaluatorGeneralWeight::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
|
39c21d4643a9b0fe659afb9b5c5cca2de38d9dbd.hip | // !!! This is a file automatically generated by hipify!!!
//
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/blit.h>
#include <oz/gpu_plm2.h>
#include <oz/foreach.h>
#include <oz/shuffle.h>
#include <oz/color.h>
namespace oz {
template<typename T> struct Blit {
gpu_plm2<T> dst_;
uint2 od_;
const gpu_plm2<T> src_;
uint2 os_;
Blit( gpu_image& dst, uint2 od, const gpu_image& src, uint2 os ) : dst_(dst), od_(od), src_(src), os_(os) {}
inline __device__ void operator()( int ix, int iy ) {
dst_.write(od_.x + ix, od_.y + iy, src_(os_.x + ix, os_.y + iy));
}
};
void blit( gpu_image& dst, unsigned x, unsigned y, const gpu_image& src, unsigned sx, unsigned sy, unsigned sw, unsigned sh ) {
if (dst.format() != src.format()) OZ_INVALID_FORMAT();
if (sw > src.w()) sw = src.w();
if (sh > src.h()) sh = src.h();
if (sx + sw > dst.w()) sw = dst.w() - sx;
if (sy + sh > dst.h()) sh = dst.h() - sy;
switch (src.format()) {
case FMT_FLOAT: {
Blit<float> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
case FMT_FLOAT2: {
Blit<float2> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
case FMT_FLOAT3: {
Blit<float3> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
case FMT_FLOAT4: {
Blit<float4> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
default:
OZ_INVALID_FORMAT();
}
}
gpu_image vstack( const gpu_image& a, const gpu_image& b, int spacing) {
if (a.format() != b.format()) OZ_INVALID_FORMAT();
int w = ::max(a.w(), b.w());
int h = a.h() + b.h() + spacing;
gpu_image dst(w, h, a.format());
dst.clear_white();
blit(dst, (w - a.w()) / 2, 0, a, 0, 0, a.w(), a.h());
blit(dst, (w - b.w()) / 2, a.h() + spacing, b, 0, 0, b.w(), b.h());
return dst;
}
gpu_image vstack( const gpu_image& a, const gpu_image& b, const gpu_image& c, int spacing) {
if ((a.format() != b.format()) || (a.format() != c.format())) OZ_INVALID_FORMAT();
int w = ::max(::max(a.w(), b.w()), c.w());
int h = a.h() + b.h() + c.h() + 2 * spacing;
gpu_image dst(w, h, a.format());
dst.clear_white();
blit(dst, (w - a.w()) / 2, 0, a, 0, 0, a.w(), a.h());
blit(dst, (w - b.w()) / 2, a.h() + spacing, b, 0, 0, b.w(), b.h());
blit(dst, (w - c.w()) / 2, a.h() + b.h() + 2 * spacing, c, 0, 0, c.w(), c.h());
return dst;
}
gpu_image vstack( const gpu_image& a, const gpu_image& b,
const gpu_image& c, const gpu_image& d, int spacing)
{
if ((a.format() != b.format()) || (a.format() != c.format())
|| (a.format() != d.format())) OZ_INVALID_FORMAT();
int w = ::max(::max(::max(a.w(), b.w()), c.w()), d.w());
int h = a.h() + b.h() + c.h() + d.h() + 3 * spacing;
gpu_image dst(w, h, a.format());
dst.clear_white();
int y = 0;
blit(dst, (w - a.w()) / 2, y, a, 0, 0, a.w(), a.h());
y += a.h() + spacing;
blit(dst, (w - b.w()) / 2, y, b, 0, 0, b.w(), b.h());
y += b.h() + spacing;
blit(dst, (w - c.w()) / 2, y, c, 0, 0, c.w(), c.h());
y += c.h() + spacing;
blit(dst, (w - c.w()) / 2, y, d, 0, 0, d.w(), d.h());
return dst;
}
gpu_image vstack_alpha( const gpu_image& src, int spacing) {
gpu_image a, b;
switch (src.format()) {
case FMT_FLOAT2:
a = shuffle(src, 0);
b = shuffle(src, 1);
break;
case FMT_FLOAT4:
a = src.convert(FMT_FLOAT3);
b = gray2rgb(shuffle(src, 3));
break;
default:
OZ_INVALID_FORMAT();
}
return vstack(a, b, spacing);
}
gpu_image vstack_channel( const gpu_image& src, int spacing) {
gpu_image a, b, c, d;
switch (src.format()) {
case FMT_FLOAT:
return src;
case FMT_FLOAT2:
a = shuffle(src, 0);
b = shuffle(src, 1);
return vstack(a, b, spacing);
case FMT_FLOAT3:
a = shuffle(src, 0);
b = shuffle(src, 1);
c = shuffle(src, 2);
return vstack(a, b, c, spacing);
case FMT_FLOAT4:
a = shuffle(src, 0);
b = shuffle(src, 1);
c = shuffle(src, 2);
d = shuffle(src, 3);
return vstack(a, b, c, d, spacing);
default:
OZ_INVALID_FORMAT();
}
}
}
| 39c21d4643a9b0fe659afb9b5c5cca2de38d9dbd.cu | //
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/blit.h>
#include <oz/gpu_plm2.h>
#include <oz/foreach.h>
#include <oz/shuffle.h>
#include <oz/color.h>
namespace oz {
template<typename T> struct Blit {
gpu_plm2<T> dst_;
uint2 od_;
const gpu_plm2<T> src_;
uint2 os_;
Blit( gpu_image& dst, uint2 od, const gpu_image& src, uint2 os ) : dst_(dst), od_(od), src_(src), os_(os) {}
inline __device__ void operator()( int ix, int iy ) {
dst_.write(od_.x + ix, od_.y + iy, src_(os_.x + ix, os_.y + iy));
}
};
void blit( gpu_image& dst, unsigned x, unsigned y, const gpu_image& src, unsigned sx, unsigned sy, unsigned sw, unsigned sh ) {
if (dst.format() != src.format()) OZ_INVALID_FORMAT();
if (sw > src.w()) sw = src.w();
if (sh > src.h()) sh = src.h();
if (sx + sw > dst.w()) sw = dst.w() - sx;
if (sy + sh > dst.h()) sh = dst.h() - sy;
switch (src.format()) {
case FMT_FLOAT: {
Blit<float> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
case FMT_FLOAT2: {
Blit<float2> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
case FMT_FLOAT3: {
Blit<float3> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
case FMT_FLOAT4: {
Blit<float4> op(dst, make_uint2(x, y), src, make_uint2(sx, sy));
foreach(sw, sh, op);
break;
}
default:
OZ_INVALID_FORMAT();
}
}
gpu_image vstack( const gpu_image& a, const gpu_image& b, int spacing) {
if (a.format() != b.format()) OZ_INVALID_FORMAT();
int w = std::max(a.w(), b.w());
int h = a.h() + b.h() + spacing;
gpu_image dst(w, h, a.format());
dst.clear_white();
blit(dst, (w - a.w()) / 2, 0, a, 0, 0, a.w(), a.h());
blit(dst, (w - b.w()) / 2, a.h() + spacing, b, 0, 0, b.w(), b.h());
return dst;
}
gpu_image vstack( const gpu_image& a, const gpu_image& b, const gpu_image& c, int spacing) {
if ((a.format() != b.format()) || (a.format() != c.format())) OZ_INVALID_FORMAT();
int w = std::max(std::max(a.w(), b.w()), c.w());
int h = a.h() + b.h() + c.h() + 2 * spacing;
gpu_image dst(w, h, a.format());
dst.clear_white();
blit(dst, (w - a.w()) / 2, 0, a, 0, 0, a.w(), a.h());
blit(dst, (w - b.w()) / 2, a.h() + spacing, b, 0, 0, b.w(), b.h());
blit(dst, (w - c.w()) / 2, a.h() + b.h() + 2 * spacing, c, 0, 0, c.w(), c.h());
return dst;
}
gpu_image vstack( const gpu_image& a, const gpu_image& b,
const gpu_image& c, const gpu_image& d, int spacing)
{
if ((a.format() != b.format()) || (a.format() != c.format())
|| (a.format() != d.format())) OZ_INVALID_FORMAT();
int w = std::max(std::max(std::max(a.w(), b.w()), c.w()), d.w());
int h = a.h() + b.h() + c.h() + d.h() + 3 * spacing;
gpu_image dst(w, h, a.format());
dst.clear_white();
int y = 0;
blit(dst, (w - a.w()) / 2, y, a, 0, 0, a.w(), a.h());
y += a.h() + spacing;
blit(dst, (w - b.w()) / 2, y, b, 0, 0, b.w(), b.h());
y += b.h() + spacing;
blit(dst, (w - c.w()) / 2, y, c, 0, 0, c.w(), c.h());
y += c.h() + spacing;
blit(dst, (w - c.w()) / 2, y, d, 0, 0, d.w(), d.h());
return dst;
}
gpu_image vstack_alpha( const gpu_image& src, int spacing) {
gpu_image a, b;
switch (src.format()) {
case FMT_FLOAT2:
a = shuffle(src, 0);
b = shuffle(src, 1);
break;
case FMT_FLOAT4:
a = src.convert(FMT_FLOAT3);
b = gray2rgb(shuffle(src, 3));
break;
default:
OZ_INVALID_FORMAT();
}
return vstack(a, b, spacing);
}
gpu_image vstack_channel( const gpu_image& src, int spacing) {
gpu_image a, b, c, d;
switch (src.format()) {
case FMT_FLOAT:
return src;
case FMT_FLOAT2:
a = shuffle(src, 0);
b = shuffle(src, 1);
return vstack(a, b, spacing);
case FMT_FLOAT3:
a = shuffle(src, 0);
b = shuffle(src, 1);
c = shuffle(src, 2);
return vstack(a, b, c, spacing);
case FMT_FLOAT4:
a = shuffle(src, 0);
b = shuffle(src, 1);
c = shuffle(src, 2);
d = shuffle(src, 3);
return vstack(a, b, c, d, spacing);
default:
OZ_INVALID_FORMAT();
}
}
}
|
fe88077f760eff6f2970872f5c091ca8e3c8292e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "misc.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
/**************************************************************************************************
**************************************************************************************************/
int getRandom( int low, int hi)
{
assert( hi >= low);
return( rand() % (hi-low+1) + low);
}
/**************************************************************************************************
**************************************************************************************************/
void finish( const char* msg, int res)
{
if( msg) printf("%s\n", msg);
exit( res);
}
/**************************************************************************************************
**************************************************************************************************/
dev_ptr s_cudaMalloc( size_t size)
{
void* ptr = 0;
gpuErrchk( "hipMalloc()", hipMalloc( &ptr, size));
return ptr;
}
/**************************************************************************************************
**************************************************************************************************/
dev_ptr s_cudaMalloc( dev_ptr* ptr, size_t size)
{
gpuErrchk( "hipMalloc()", hipMalloc( ptr, size));
return *ptr;
}
/**************************************************************************************************
**************************************************************************************************/
int s_cudaFree( dev_ptr devPtr)
{
gpuErrchk( "hipFree()", hipFree( devPtr));
return 0;
}
/**************************************************************************************************
**************************************************************************************************/
void gpuAssert(const char* operation, hipError_t err, const char *file, int line)
{
if( err != hipSuccess)
{
printf("%s %d %s failed: [%d] %s\n", file, line, operation, err, hipGetErrorString( err));
exit( -1);
}
}
/**************************************************************************************************
* safely copies <count> bytes from <src> onto <dst>
* returns <dst>
**************************************************************************************************/
void* s_cudaMemcpy( void* dst, const void* src, size_t count, enum hipMemcpyKind kind)
{
gpuErrchk( "hipMemcpy()", hipMemcpy( dst, src, count, kind));
return dst;
}
/**************************************************************************************************
* copies <size> bytes from <hostPtr> onto <devPtr>
* returns <devPtr>
**************************************************************************************************/
dev_ptr s_hostToDevice( dev_ptr devPtr, const host_ptr hostPtr, size_t size)
{
s_cudaMemcpy( devPtr, hostPtr, size, hipMemcpyHostToDevice);
return devPtr;
}
/**************************************************************************************************
* copies <size> bytes from <devPtr> onto <hostPtr>
* returns <hostPtr>
**************************************************************************************************/
host_ptr s_deviceToHost( host_ptr hostPtr, const dev_ptr devPtr, size_t size)
{
s_cudaMemcpy( hostPtr, devPtr, size, hipMemcpyDeviceToHost);
return hostPtr;
}
/**************************************************************************************************
* allocates <size> bytes in the device and copies the content in <hostPtr> onto it
* returned pointer must be deallocated using hipFree or s_cudaFree
**************************************************************************************************/
dev_ptr s_allocToDevice( const host_ptr hostPtr, size_t size)
{
dev_ptr ret = 0;
if( size > 0)
{
ret = s_cudaMalloc( size);
s_hostToDevice( ret, hostPtr, size);
}
return ret;
}
/**************************************************************************************************
* allocates <size> bytes in the host and copies the content in <devPtr> onto it
* returned pointer must be deallocated using free
**************************************************************************************************/
host_ptr s_allocToHost( const dev_ptr devPtr, size_t size)
{
host_ptr ret = 0;
if( size > 0)
{
ret = malloc( size);
s_deviceToHost( ret, devPtr, size);
}
return ret;
}
/**************************************************************************************************
**************************************************************************************************/
__device__ void reduce_max(int* data, int count, int* res)
{
int tid = threadIdx.x;
bool odd = count % 2;
for( int s=count/2; s>0; s>>=1)
{
if( tid < s)
{
data[tid] = _max(data[tid],data[tid+s]);
if( tid == s-1 && odd)
data[tid] = _max(data[tid],data[tid+s+1]);
}
odd = s % 2;
__syncthreads();
}
if( !tid) res[blockIdx.x] = data[0];
}
| fe88077f760eff6f2970872f5c091ca8e3c8292e.cu | #include "misc.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
/**************************************************************************************************
**************************************************************************************************/
int getRandom( int low, int hi)
{
assert( hi >= low);
return( rand() % (hi-low+1) + low);
}
/**************************************************************************************************
**************************************************************************************************/
void finish( const char* msg, int res)
{
if( msg) printf("%s\n", msg);
exit( res);
}
/**************************************************************************************************
**************************************************************************************************/
dev_ptr s_cudaMalloc( size_t size)
{
void* ptr = 0;
gpuErrchk( "cudaMalloc()", cudaMalloc( &ptr, size));
return ptr;
}
/**************************************************************************************************
**************************************************************************************************/
dev_ptr s_cudaMalloc( dev_ptr* ptr, size_t size)
{
gpuErrchk( "cudaMalloc()", cudaMalloc( ptr, size));
return *ptr;
}
/**************************************************************************************************
**************************************************************************************************/
int s_cudaFree( dev_ptr devPtr)
{
gpuErrchk( "cudaFree()", cudaFree( devPtr));
return 0;
}
/**************************************************************************************************
**************************************************************************************************/
void gpuAssert(const char* operation, cudaError_t err, const char *file, int line)
{
if( err != cudaSuccess)
{
printf("%s %d %s failed: [%d] %s\n", file, line, operation, err, cudaGetErrorString( err));
exit( -1);
}
}
/**************************************************************************************************
* safely copies <count> bytes from <src> onto <dst>
* returns <dst>
**************************************************************************************************/
void* s_cudaMemcpy( void* dst, const void* src, size_t count, enum cudaMemcpyKind kind)
{
gpuErrchk( "cudaMemcpy()", cudaMemcpy( dst, src, count, kind));
return dst;
}
/**************************************************************************************************
* copies <size> bytes from <hostPtr> onto <devPtr>
* returns <devPtr>
**************************************************************************************************/
dev_ptr s_hostToDevice( dev_ptr devPtr, const host_ptr hostPtr, size_t size)
{
s_cudaMemcpy( devPtr, hostPtr, size, cudaMemcpyHostToDevice);
return devPtr;
}
/**************************************************************************************************
* copies <size> bytes from <devPtr> onto <hostPtr>
* returns <hostPtr>
**************************************************************************************************/
host_ptr s_deviceToHost( host_ptr hostPtr, const dev_ptr devPtr, size_t size)
{
s_cudaMemcpy( hostPtr, devPtr, size, cudaMemcpyDeviceToHost);
return hostPtr;
}
/**************************************************************************************************
* allocates <size> bytes in the device and copies the content in <hostPtr> onto it
* returned pointer must be deallocated using cudaFree or s_cudaFree
**************************************************************************************************/
dev_ptr s_allocToDevice( const host_ptr hostPtr, size_t size)
{
dev_ptr ret = 0;
if( size > 0)
{
ret = s_cudaMalloc( size);
s_hostToDevice( ret, hostPtr, size);
}
return ret;
}
/**************************************************************************************************
* allocates <size> bytes in the host and copies the content in <devPtr> onto it
* returned pointer must be deallocated using free
**************************************************************************************************/
host_ptr s_allocToHost( const dev_ptr devPtr, size_t size)
{
host_ptr ret = 0;
if( size > 0)
{
ret = malloc( size);
s_deviceToHost( ret, devPtr, size);
}
return ret;
}
/**************************************************************************************************
**************************************************************************************************/
__device__ void reduce_max(int* data, int count, int* res)
{
int tid = threadIdx.x;
bool odd = count % 2;
for( int s=count/2; s>0; s>>=1)
{
if( tid < s)
{
data[tid] = _max(data[tid],data[tid+s]);
if( tid == s-1 && odd)
data[tid] = _max(data[tid],data[tid+s+1]);
}
odd = s % 2;
__syncthreads();
}
if( !tid) res[blockIdx.x] = data[0];
}
|
3ebc2629744671e416ecbf86378a660a1422bace.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "subgradinput.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
hipMalloc(&gradInput, XSIZE*YSIZE);
float *gradOutput = NULL;
hipMalloc(&gradOutput, XSIZE*YSIZE);
int input_n = 1;
int input_h = 1;
int input_w = 1;
int kH = 1;
int kW = 1;
int dH = 1;
int dW = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
subgradinput), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
subgradinput), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
subgradinput), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3ebc2629744671e416ecbf86378a660a1422bace.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "subgradinput.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
cudaMalloc(&gradInput, XSIZE*YSIZE);
float *gradOutput = NULL;
cudaMalloc(&gradOutput, XSIZE*YSIZE);
int input_n = 1;
int input_h = 1;
int input_w = 1;
int kH = 1;
int kW = 1;
int dH = 1;
int dW = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
subgradinput<<<gridBlock,threadBlock>>>(gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
subgradinput<<<gridBlock,threadBlock>>>(gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
subgradinput<<<gridBlock,threadBlock>>>(gradInput,gradOutput,input_n,input_h,input_w,kH,kW,dH,dW);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
64d5b489a6eeac717ed9af4d75ef71b32930c306.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void convertKinectFloatToRGBA_kernel(uchar4 *out_image, const float *in_image, int width, int height, int pitch, float lowerLim, float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = *((float *)((char *)in_image + y * pitch) + x);
val = (val == 0.0f) ? nanf("") : val;
// first draw unmatched pixels in white
if (!isfinite(val)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
val -= lowerLim;
val /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (val < 0.25f) {
r = 0;
g = 4.0f * val;
} else if (val < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - val);
} else if (val < 0.75f) {
r = 4.0f * (val - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - val);
b = 0;
}
temp.x = 255.0 * r;
temp.y = 255.0 * g;
temp.z = 255.0 * b;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
} | 64d5b489a6eeac717ed9af4d75ef71b32930c306.cu | #include "includes.h"
__global__ void convertKinectFloatToRGBA_kernel(uchar4 *out_image, const float *in_image, int width, int height, int pitch, float lowerLim, float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = *((float *)((char *)in_image + y * pitch) + x);
val = (val == 0.0f) ? nanf("") : val;
// first draw unmatched pixels in white
if (!isfinite(val)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
// rescale value from [lowerLim,upperLim] to [0,1]
val -= lowerLim;
val /= (upperLim - lowerLim);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (val < 0.25f) {
r = 0;
g = 4.0f * val;
} else if (val < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - val);
} else if (val < 0.75f) {
r = 4.0f * (val - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - val);
b = 0;
}
temp.x = 255.0 * r;
temp.y = 255.0 * g;
temp.z = 255.0 * b;
temp.w = 255;
}
out_image[__mul24(y, width) + x] = temp;
}
} |
0e575cc0576bd6a3a673da4b659292a10bf348ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive_sm.h"
namespace StreamCompaction {
namespace NaiveSM {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// naive scan implemented with shared memory
__global__ void kernNaiveScan(int N, int *odata, int *idata){
extern __shared__ int tmp[];
int pout = 0;
int pin = 1;
int index = threadIdx.x;
if (index >= N) return;
tmp[index] = index > 0 ? idata[index - 1]: 0;
__syncthreads();
for (int offset = 1; offset < N; offset *= 2){
pout = 1 - pout;
pin = 1 - pin;
// the suedo code on gems 3 contains error
if (index >= offset) tmp[pout * N + index] = tmp[pin * N + index - offset] + tmp[pin * N + index];
else tmp[pout * N + index ] = tmp[pin * N + index];
__syncthreads();
}
odata[index] = tmp[pout * N + index];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
dim3 fullBlockPerGrid((n + blockSize - 1) / blockSize);
int* dev_in, *dev_out;
hipMalloc((void**) &dev_in, n * sizeof(int));
checkCUDAError("hipMalloc dev_in failed");
hipMalloc((void**) &dev_out, n * sizeof(int));
checkCUDAError("hipMalloc dev_out failed");
hipMemcpy(dev_in, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy HostToDevice failed");
timer().startGpuTimer();
hipLaunchKernelGGL(( kernNaiveScan) , dim3(fullBlockPerGrid), dim3(blockSize), 2 * n * sizeof(int) , 0, n, dev_out, dev_in);
checkCUDAError("kernNaiveScan dev_in failed");
timer().endGpuTimer();
hipMemcpy(odata, dev_out, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy DeviceToHost failed");
hipFree(dev_in);
hipFree(dev_out);
}
}
}
| 0e575cc0576bd6a3a673da4b659292a10bf348ce.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive_sm.h"
namespace StreamCompaction {
namespace NaiveSM {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// naive scan implemented with shared memory
__global__ void kernNaiveScan(int N, int *odata, int *idata){
extern __shared__ int tmp[];
int pout = 0;
int pin = 1;
int index = threadIdx.x;
if (index >= N) return;
tmp[index] = index > 0 ? idata[index - 1]: 0;
__syncthreads();
for (int offset = 1; offset < N; offset *= 2){
pout = 1 - pout;
pin = 1 - pin;
// the suedo code on gems 3 contains error
if (index >= offset) tmp[pout * N + index] = tmp[pin * N + index - offset] + tmp[pin * N + index];
else tmp[pout * N + index ] = tmp[pin * N + index];
__syncthreads();
}
odata[index] = tmp[pout * N + index];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
dim3 fullBlockPerGrid((n + blockSize - 1) / blockSize);
int* dev_in, *dev_out;
cudaMalloc((void**) &dev_in, n * sizeof(int));
checkCUDAError("cudaMalloc dev_in failed");
cudaMalloc((void**) &dev_out, n * sizeof(int));
checkCUDAError("cudaMalloc dev_out failed");
cudaMemcpy(dev_in, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy HostToDevice failed");
timer().startGpuTimer();
kernNaiveScan <<< fullBlockPerGrid, blockSize, 2 * n * sizeof(int) >>> (n, dev_out, dev_in);
checkCUDAError("kernNaiveScan dev_in failed");
timer().endGpuTimer();
cudaMemcpy(odata, dev_out, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy DeviceToHost failed");
cudaFree(dev_in);
cudaFree(dev_out);
}
}
}
|
fe49a0d611aff13a4d08877e463460064a2a3d28.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <dmlc/filesystem.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "xgboost/json.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::safe_cuda(hipSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientSumT> maker(0, page.get(), {}, kNRows, param, kNCols, kNCols,
true, batch_param);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
HostDeviceVector<GradientPair> gpair(kNRows);
for (auto &gp : gpair.HostVector()) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gp = GradientPair(grad, hess);
}
gpair.SetDevice(0);
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector());
maker.row_partitioner.reset(new RowPartitioner(0, kNRows));
maker.hist.AllocateHistogram(0);
maker.gpair = gpair.DeviceSpan();
maker.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), data_size,
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
TEST(GpuHist, ApplySplit) {
RegTree tree;
ExpandEntry candidate;
candidate.nid = 0;
candidate.left_weight = 1.0f;
candidate.right_weight = 2.0f;
candidate.base_weight = 3.0f;
candidate.split.is_cat = true;
candidate.split.fvalue = 1.0f; // at cat 1
size_t n_rows = 10;
size_t n_cols = 10;
auto m = RandomDataGenerator{n_rows, n_cols, 0}.GenerateDMatrix(true);
GenericParameter p;
p.InitAllowUnknown(Args{});
TrainParam tparam;
tparam.InitAllowUnknown(Args{});
BatchParam bparam;
bparam.gpu_id = 0;
bparam.max_bin = 3;
bparam.gpu_page_size = 0;
for (auto& ellpack : m->GetBatches<EllpackPage>(bparam)){
auto impl = ellpack.Impl();
HostDeviceVector<FeatureType> feature_types(10, FeatureType::kCategorical);
feature_types.SetDevice(bparam.gpu_id);
tree::GPUHistMakerDevice<GradientPairPrecise> updater(
0, impl, feature_types.ConstDeviceSpan(), n_rows, tparam, 0, n_cols, true, bparam);
updater.ApplySplit(candidate, &tree);
ASSERT_EQ(tree.GetSplitTypes().size(), 3);
ASSERT_EQ(tree.GetSplitTypes()[0], FeatureType::kCategorical);
ASSERT_EQ(tree.GetSplitCategories().size(), 1);
uint32_t bits = 1u << 30; // bits: 0, 1, 0, 0, 0, ..., 0
ASSERT_EQ(tree.GetSplitCategories().back(), bits);
ASSERT_EQ(updater.node_categories.size(), 1);
}
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateRootSplit) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args{
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize GPUHistMakerDevice
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientPairPrecise>
maker(0, page.get(), {}, kNRows, param, kNCols, kNCols, true, batch_param);
// Initialize GPUHistMakerDevice::node_sum_gradients
maker.node_sum_gradients = {};
// Initialize GPUHistMakerDevice::cut
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
page->Cuts() = cmat;
maker.monotone_constraints = param.monotone_constraints;
// Initialize GPUHistMakerDevice::hist
maker.hist.Init(0, (max_bins - 1) * kNCols);
maker.hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(maker.hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
maker.hist.Data().begin());
std::vector<float> feature_weights;
maker.column_sampler.Init(kNCols, feature_weights, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
DeviceSplitCandidate res = maker.EvaluateRootSplit({6.4f, 12.8f});
ASSERT_EQ(res.findex, 7);
ASSERT_NEAR(res.fvalue, 0.26, xgboost::kRtEps);
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(training_params, &generic_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Configure(training_params, &generic_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector());
const auto &maker_ext = hist_maker_ext.maker;
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
// gamma is an alias of min_split_loss
int32_t TestMinSplitLoss(DMatrix* dmat, float gamma, HostDeviceVector<GradientPair>* gpair) {
Args args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"},
// test gamma
{"gamma", std::to_string(gamma)}
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(args, &generic_param);
RegTree tree;
hist_maker.Update(gpair, dmat, {&tree});
auto n_nodes = tree.NumExtraNodes();
return n_nodes;
}
TEST(GpuHist, MinSplitLoss) {
constexpr size_t kRows = 32;
constexpr size_t kCols = 16;
constexpr float kSparsity = 0.6;
auto dmat = RandomDataGenerator(kRows, kCols, kSparsity).Seed(3).GenerateDMatrix();
auto gpair = GenerateRandomGradients(kRows);
{
int32_t n_nodes = TestMinSplitLoss(dmat.get(), 0.01, &gpair);
// This is not strictly verified, meaning the numeber `2` is whatever GPU_Hist retured
// when writing this test, and only used for testing larger gamma (below) does prevent
// building tree.
ASSERT_EQ(n_nodes, 2);
}
{
int32_t n_nodes = TestMinSplitLoss(dmat.get(), 100.0, &gpair);
// No new nodes with gamma == 100.
ASSERT_EQ(n_nodes, static_cast<decltype(n_nodes)>(0));
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
size_t gpu_page_size, RegTree* tree,
HostDeviceVector<bst_float>* preds, float subsample = 1.0f,
const std::string& sampling_method = "uniform",
int max_bin = 2) {
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>({0, max_bin, gpu_page_size})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(max_bin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"subsample", std::to_string(subsample)},
{"sampling_method", sampling_method},
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
generic_param.gpu_page_size = gpu_page_size;
hist_maker.Configure(args, &generic_param);
hist_maker.Update(gpair, dmat, {tree});
hist_maker.UpdatePredictionCache(dmat, preds);
}
TEST(GpuHist, UniformSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
}
}
TEST(GpuHist, GradientBasedSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"gradient_based", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
}
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat_ext(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
}
}
TEST(GpuHist, ExternalMemoryWithSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
const std::string kSamplingMethod = "gradient_based";
common::GlobalRandom().seed(0);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat_ext(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
auto rng = common::GlobalRandom();
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod,
kRows);
// Build another tree using multiple ELLPACK pages.
common::GlobalRandom() = rng;
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext,
kSubsample, kSamplingMethod, kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
}
}
TEST(GpuHist, ConfigIO) {
GenericParameter generic_param(CreateEmptyGenericParam(0));
std::unique_ptr<TreeUpdater> updater {TreeUpdater::Create("grow_gpu_hist", &generic_param) };
updater->Configure(Args{});
Json j_updater { Object() };
updater->SaveConfig(&j_updater);
ASSERT_TRUE(IsA<Object>(j_updater["gpu_hist_train_param"]));
ASSERT_TRUE(IsA<Object>(j_updater["train_param"]));
updater->LoadConfig(j_updater);
Json j_updater_roundtrip { Object() };
updater->SaveConfig(&j_updater_roundtrip);
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["gpu_hist_train_param"]));
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["train_param"]));
ASSERT_EQ(j_updater, j_updater_roundtrip);
}
TEST(GpuHist, MaxDepth) {
GenericParameter generic_param(CreateEmptyGenericParam(0));
size_t constexpr kRows = 16;
size_t constexpr kCols = 4;
auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat}));
learner->SetParam("max_depth", "32");
learner->Configure();
ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error);
}
} // namespace tree
} // namespace xgboost
| fe49a0d611aff13a4d08877e463460064a2a3d28.cu | /*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <dmlc/filesystem.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "xgboost/json.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::safe_cuda(cudaSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientSumT> maker(0, page.get(), {}, kNRows, param, kNCols, kNCols,
true, batch_param);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
HostDeviceVector<GradientPair> gpair(kNRows);
for (auto &gp : gpair.HostVector()) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gp = GradientPair(grad, hess);
}
gpair.SetDevice(0);
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector());
maker.row_partitioner.reset(new RowPartitioner(0, kNRows));
maker.hist.AllocateHistogram(0);
maker.gpair = gpair.DeviceSpan();
maker.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
TEST(GpuHist, ApplySplit) {
RegTree tree;
ExpandEntry candidate;
candidate.nid = 0;
candidate.left_weight = 1.0f;
candidate.right_weight = 2.0f;
candidate.base_weight = 3.0f;
candidate.split.is_cat = true;
candidate.split.fvalue = 1.0f; // at cat 1
size_t n_rows = 10;
size_t n_cols = 10;
auto m = RandomDataGenerator{n_rows, n_cols, 0}.GenerateDMatrix(true);
GenericParameter p;
p.InitAllowUnknown(Args{});
TrainParam tparam;
tparam.InitAllowUnknown(Args{});
BatchParam bparam;
bparam.gpu_id = 0;
bparam.max_bin = 3;
bparam.gpu_page_size = 0;
for (auto& ellpack : m->GetBatches<EllpackPage>(bparam)){
auto impl = ellpack.Impl();
HostDeviceVector<FeatureType> feature_types(10, FeatureType::kCategorical);
feature_types.SetDevice(bparam.gpu_id);
tree::GPUHistMakerDevice<GradientPairPrecise> updater(
0, impl, feature_types.ConstDeviceSpan(), n_rows, tparam, 0, n_cols, true, bparam);
updater.ApplySplit(candidate, &tree);
ASSERT_EQ(tree.GetSplitTypes().size(), 3);
ASSERT_EQ(tree.GetSplitTypes()[0], FeatureType::kCategorical);
ASSERT_EQ(tree.GetSplitCategories().size(), 1);
uint32_t bits = 1u << 30; // bits: 0, 1, 0, 0, 0, ..., 0
ASSERT_EQ(tree.GetSplitCategories().back(), bits);
ASSERT_EQ(updater.node_categories.size(), 1);
}
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateRootSplit) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args{
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize GPUHistMakerDevice
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientPairPrecise>
maker(0, page.get(), {}, kNRows, param, kNCols, kNCols, true, batch_param);
// Initialize GPUHistMakerDevice::node_sum_gradients
maker.node_sum_gradients = {};
// Initialize GPUHistMakerDevice::cut
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
page->Cuts() = cmat;
maker.monotone_constraints = param.monotone_constraints;
// Initialize GPUHistMakerDevice::hist
maker.hist.Init(0, (max_bins - 1) * kNCols);
maker.hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(maker.hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
maker.hist.Data().begin());
std::vector<float> feature_weights;
maker.column_sampler.Init(kNCols, feature_weights, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
DeviceSplitCandidate res = maker.EvaluateRootSplit({6.4f, 12.8f});
ASSERT_EQ(res.findex, 7);
ASSERT_NEAR(res.fvalue, 0.26, xgboost::kRtEps);
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(training_params, &generic_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Configure(training_params, &generic_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector());
const auto &maker_ext = hist_maker_ext.maker;
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
// gamma is an alias of min_split_loss
int32_t TestMinSplitLoss(DMatrix* dmat, float gamma, HostDeviceVector<GradientPair>* gpair) {
Args args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"},
// test gamma
{"gamma", std::to_string(gamma)}
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(args, &generic_param);
RegTree tree;
hist_maker.Update(gpair, dmat, {&tree});
auto n_nodes = tree.NumExtraNodes();
return n_nodes;
}
TEST(GpuHist, MinSplitLoss) {
constexpr size_t kRows = 32;
constexpr size_t kCols = 16;
constexpr float kSparsity = 0.6;
auto dmat = RandomDataGenerator(kRows, kCols, kSparsity).Seed(3).GenerateDMatrix();
auto gpair = GenerateRandomGradients(kRows);
{
int32_t n_nodes = TestMinSplitLoss(dmat.get(), 0.01, &gpair);
// This is not strictly verified, meaning the numeber `2` is whatever GPU_Hist retured
// when writing this test, and only used for testing larger gamma (below) does prevent
// building tree.
ASSERT_EQ(n_nodes, 2);
}
{
int32_t n_nodes = TestMinSplitLoss(dmat.get(), 100.0, &gpair);
// No new nodes with gamma == 100.
ASSERT_EQ(n_nodes, static_cast<decltype(n_nodes)>(0));
}
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
size_t gpu_page_size, RegTree* tree,
HostDeviceVector<bst_float>* preds, float subsample = 1.0f,
const std::string& sampling_method = "uniform",
int max_bin = 2) {
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>({0, max_bin, gpu_page_size})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(max_bin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"subsample", std::to_string(subsample)},
{"sampling_method", sampling_method},
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
generic_param.gpu_page_size = gpu_page_size;
hist_maker.Configure(args, &generic_param);
hist_maker.Update(gpair, dmat, {tree});
hist_maker.UpdatePredictionCache(dmat, preds);
}
TEST(GpuHist, UniformSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
}
}
TEST(GpuHist, GradientBasedSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"gradient_based", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
}
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat_ext(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
}
}
TEST(GpuHist, ExternalMemoryWithSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
const std::string kSamplingMethod = "gradient_based";
common::GlobalRandom().seed(0);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat_ext(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
auto rng = common::GlobalRandom();
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod,
kRows);
// Build another tree using multiple ELLPACK pages.
common::GlobalRandom() = rng;
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext,
kSubsample, kSamplingMethod, kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (int i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
}
}
TEST(GpuHist, ConfigIO) {
GenericParameter generic_param(CreateEmptyGenericParam(0));
std::unique_ptr<TreeUpdater> updater {TreeUpdater::Create("grow_gpu_hist", &generic_param) };
updater->Configure(Args{});
Json j_updater { Object() };
updater->SaveConfig(&j_updater);
ASSERT_TRUE(IsA<Object>(j_updater["gpu_hist_train_param"]));
ASSERT_TRUE(IsA<Object>(j_updater["train_param"]));
updater->LoadConfig(j_updater);
Json j_updater_roundtrip { Object() };
updater->SaveConfig(&j_updater_roundtrip);
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["gpu_hist_train_param"]));
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["train_param"]));
ASSERT_EQ(j_updater, j_updater_roundtrip);
}
TEST(GpuHist, MaxDepth) {
GenericParameter generic_param(CreateEmptyGenericParam(0));
size_t constexpr kRows = 16;
size_t constexpr kCols = 4;
auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat}));
learner->SetParam("max_depth", "32");
learner->Configure();
ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error);
}
} // namespace tree
} // namespace xgboost
|
d778f71d76598e2120333f0b87a7886990a3e7b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
// Thread block size
#define BLOCK_SIZE 4
#define MSIZE 4
// Matrices are stored in row-major order:
// M(row, col) = M[row * MSIZE + col]
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(float *, float *, float *);
int checkProduct(float * A, float * B, float * C)
//Check matrix product C = AB
{
int i,j,k; //loop variables
int fail = 0;
float tol = 1e-2;
float ABelement;
//loop over rows
for (i = 0; i < MSIZE; i++)
{
//loop over columns
for (j = 0; j < MSIZE; j++)
{
ABelement = 0.0f;
//loop to compute matrix element
for (k = 0; k < MSIZE; k++)
{
ABelement += A[i*MSIZE + k] * B[k*MSIZE + j];
}
//if matrix element is equal within tolerance
if (fabsf(C[i*MSIZE + j] - ABelement) > tol)
{
printf("Matrix product problem: C != AB\n");
printf("row %d col %d diff=%f\n", i,j,abs(C[i*MSIZE + j] - ABelement));
fail = 1;
}
if (fail == 1) break;
}
if (fail == 1) break;
}
if (fail == 0) printf("Matrix product confirmed!\n");
return fail;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(float* A, float* B, float* C)
{
float *d_A;
size_t size = MSIZE * MSIZE * sizeof(float);
//allocate space for matrix A on device
hipMalloc(&d_A, size);
//copy matrix A to device
hipMemcpy(d_A, A, size,
hipMemcpyHostToDevice);
float *d_B;
//allocate space for matrix B on device
hipMalloc(&d_B, size);
//copy matrix B to device
hipMemcpy(d_B, B, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
float *d_C;
hipMalloc(&d_C, size);
// Invoke kernel
hipLaunchKernelGGL(( MatMulKernel), dim3(MSIZE * MSIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_A, d_B, d_C);
// Read C from device memory
hipMemcpy(C, d_C, size,
hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float* A, float* B, float* C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
//compute the thread index
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
//compute the row and column
int row = idx / MSIZE;
int col = idx - row * MSIZE;
for (int i = 0; i < MSIZE; ++i)
Cvalue += A[row * MSIZE + i]
* B[i * MSIZE + col];
C[idx] = Cvalue;
}
int main(int argc, char** argv)
{
float *matA, *matB, *matC;
int i, j; //row and column indices
uint size = MSIZE * MSIZE * sizeof(float);
// Allocate space for the matrices
matA = (float *) malloc(size);
matB = (float *) malloc(size);
matC = (float *) malloc(size);
// Seed the random number generator
srand( time(NULL) );
// Generate a random value for each element of A and B
for( i = 0; i < MSIZE; i++)
{
for( j = 0; j < MSIZE; j++)
{
matA[i * MSIZE + j] = rand() / (float) RAND_MAX;
matB[i * MSIZE + j] = rand() / (float) RAND_MAX;
}
}
//Multiply the matrices
MatMul(matA, matB, matC);
//Check our work on the host
if (checkProduct(matA, matB, matC) != 0)
printf("Your program may have errors\n");
return 0;
}
| d778f71d76598e2120333f0b87a7886990a3e7b8.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
// Thread block size
#define BLOCK_SIZE 4
#define MSIZE 4
// Matrices are stored in row-major order:
// M(row, col) = M[row * MSIZE + col]
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(float *, float *, float *);
int checkProduct(float * A, float * B, float * C)
//Check matrix product C = AB
{
int i,j,k; //loop variables
int fail = 0;
float tol = 1e-2;
float ABelement;
//loop over rows
for (i = 0; i < MSIZE; i++)
{
//loop over columns
for (j = 0; j < MSIZE; j++)
{
ABelement = 0.0f;
//loop to compute matrix element
for (k = 0; k < MSIZE; k++)
{
ABelement += A[i*MSIZE + k] * B[k*MSIZE + j];
}
//if matrix element is equal within tolerance
if (fabsf(C[i*MSIZE + j] - ABelement) > tol)
{
printf("Matrix product problem: C != AB\n");
printf("row %d col %d diff=%f\n", i,j,abs(C[i*MSIZE + j] - ABelement));
fail = 1;
}
if (fail == 1) break;
}
if (fail == 1) break;
}
if (fail == 0) printf("Matrix product confirmed!\n");
return fail;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(float* A, float* B, float* C)
{
float *d_A;
size_t size = MSIZE * MSIZE * sizeof(float);
//allocate space for matrix A on device
cudaMalloc(&d_A, size);
//copy matrix A to device
cudaMemcpy(d_A, A, size,
cudaMemcpyHostToDevice);
float *d_B;
//allocate space for matrix B on device
cudaMalloc(&d_B, size);
//copy matrix B to device
cudaMemcpy(d_B, B, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
float *d_C;
cudaMalloc(&d_C, size);
// Invoke kernel
MatMulKernel<<<MSIZE * MSIZE / BLOCK_SIZE, BLOCK_SIZE>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C, d_C, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float* A, float* B, float* C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
//compute the thread index
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
//compute the row and column
int row = idx / MSIZE;
int col = idx - row * MSIZE;
for (int i = 0; i < MSIZE; ++i)
Cvalue += A[row * MSIZE + i]
* B[i * MSIZE + col];
C[idx] = Cvalue;
}
int main(int argc, char** argv)
{
float *matA, *matB, *matC;
int i, j; //row and column indices
uint size = MSIZE * MSIZE * sizeof(float);
// Allocate space for the matrices
matA = (float *) malloc(size);
matB = (float *) malloc(size);
matC = (float *) malloc(size);
// Seed the random number generator
srand( time(NULL) );
// Generate a random value for each element of A and B
for( i = 0; i < MSIZE; i++)
{
for( j = 0; j < MSIZE; j++)
{
matA[i * MSIZE + j] = rand() / (float) RAND_MAX;
matB[i * MSIZE + j] = rand() / (float) RAND_MAX;
}
}
//Multiply the matrices
MatMul(matA, matB, matC);
//Check our work on the host
if (checkProduct(matA, matB, matC) != 0)
printf("Your program may have errors\n");
return 0;
}
|
1adccb650079f1c646617d02c1b627d6e6a78222.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} | 1adccb650079f1c646617d02c1b627d6e6a78222.cu | #include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} |
c83f0e0f642bb86e53ff13b1c483670a90838818.hip | // !!! This is a file automatically generated by hipify!!!
/*
CUDA BarnesHut v3.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher <[email protected]>
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#ifdef __KEPLER__
// thread count
#define THREADS1 1024 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 768
#define THREADS4 128
#define THREADS5 1024
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 2
#define FACTOR2 2
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 4 /* must all be resident at the same time */
#define FACTOR5 2
#define FACTOR6 2
#else
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 512
#define THREADS3 128
#define THREADS4 64
#define THREADS5 256
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 3
#define FACTOR3 6 /* must all be resident at the same time */
#define FACTOR4 6 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 1
#endif
#define WARPSIZE 32
#define MAXDEPTH 32
__device__ volatile int stepd, bottomd, maxdepthd;
__device__ unsigned int blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel(int * __restrict errd)
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel(int nnodesd, int nbodiesd, volatile int * __restrict startd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict maxxd, volatile float * __restrict maxyd, volatile float * __restrict maxzd, volatile float * __restrict minxd, volatile float * __restrict minyd, volatile float * __restrict minzd)
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = fminf(minx, val);
maxx = fmaxf(maxx, val);
val = posyd[j];
miny = fminf(miny, val);
maxy = fmaxf(maxy, val);
val = poszd[j];
minz = fminf(minz, val);
maxz = fmaxf(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = fminf(minx, sminx[k]);
smaxx[i] = maxx = fmaxf(maxx, smaxx[k]);
sminy[i] = miny = fminf(miny, sminy[k]);
smaxy[i] = maxy = fmaxf(maxy, smaxy[k]);
sminz[i] = minz = fminf(minz, sminz[k]);
smaxz[i] = maxz = fmaxf(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
__threadfence();
inc = gridDim.x - 1;
if (inc == atomicInc(&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = fminf(minx, minxd[j]);
maxx = fmaxf(maxx, maxxd[j]);
miny = fminf(miny, minyd[j]);
maxy = fmaxf(maxy, maxyd[j]);
minz = fminf(minz, minzd[j]);
maxz = fmaxf(maxz, maxzd[j]);
}
// compute 'radius'
val = fmaxf(maxx - minx, maxy - miny);
radiusd = fmaxf(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(1024, 1)
void ClearKernel1(int nnodesd, int nbodiesd, volatile int * __restrict childd)
{
register int k, inc, top, bottom;
top = 8 * nnodesd;
bottom = 8 * nbodiesd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < top) {
childd[k] = -1;
k += inc;
}
}
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, volatile int * __restrict childd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register float dx, dy, dz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius * 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (rootx < px) {j = 1; dx = r;}
if (rooty < py) {j |= 2; dy = r;}
if (rootz < pz) {j |= 4; dz = r;}
x = rootx + dx;
y = rooty + dy;
z = rootz + dz;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == -1) {
if (-1 == atomicCAS((int *)&childd[locked], -1, i)) { // if null, just insert the new body
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
} else { // there already is a body in this position
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
if (patch != -1) {
childd[n*8+j] = cell;
}
patch = max(patch, cell);
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j |= 2;
if (z < poszd[ch]) j |= 4;
childd[cell*8+j] = ch;
n = cell;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 2;
}
}
}
__syncthreads(); // __threadfence();
if (skip == 2) {
childd[locked] = patch;
}
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
__global__
__launch_bounds__(1024, 1)
void ClearKernel2(int nnodesd, volatile int * __restrict startd, volatile float * __restrict massd)
{
register int k, inc, bottom;
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < nnodesd) {
massd[k] = -1.0f;
startd[k] = -1;
k += inc;
}
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel(const int nnodesd, const int nbodiesd, volatile int * __restrict countd, const int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, k, ch, inc, cnt, bottom, flag;
register float m, cm, px, py, pz;
__shared__ int child[THREADS3 * 8];
__shared__ float mass[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
register int restart = k;
for (j = 0; j < 5; j++) { // wait-free pre-passes
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] < 0.0f) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch >= nbodiesd) && ((mass[i*THREADS3+threadIdx.x] = massd[ch]) < 0.0f)) {
break;
}
}
if (i == 8) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
}
}
k += inc; // move on to next cell
}
k = restart;
}
flag = 0;
j = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] >= 0.0f) {
k += inc;
} else {
if (j == 0) {
j = 8;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch < nbodiesd) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
} else {
j = 8;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if ((ch < nbodiesd) || (mass[i*THREADS3+threadIdx.x] >= 0.0f) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
}
if (j == 0) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
flag = 1;
}
}
__syncthreads(); // __threadfence();
if (flag != 0) {
massd[k] = cm;
k += inc;
flag = 0;
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel(int nnodesd, int nbodiesd, int * __restrict sortd, int * __restrict countd, volatile int * __restrict startd, int * __restrict childd)
{
register int i, j, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
j++;
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
}
k -= dec; // move on to next cell
}
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, float dthfd, float itolsqd, float epssqd, volatile int * __restrict sortd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, j, k, n, depth, base, sbase, diff, pd, nd;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float dq[MAXDEPTH * THREADS5/WARPSIZE];
if (0 == threadIdx.x) {
tmp = radiusd * 2;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepthd; i++) {
dq[i] = dq[i - 1] * 0.25f;
dq[i - 1] += epssqd;
}
dq[i - 1] += epssqd;
if (maxdepthd > MAXDEPTH) {
*errd = maxdepthd;
}
}
__syncthreads();
if (maxdepthd <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
__threadfence_block();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
pos[j] = 0;
node[j] = nnodesd * 8;
}
do {
// stack is not empty
pd = pos[depth];
nd = node[depth];
while (pd < 8) {
// node on top of stack has more children to process
n = childd[nd + pd]; // load child pointer
pd++;
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
if (sbase == threadIdx.x) { // maybe don't push and inc if last child
pos[depth] = pd;
node[depth] = nd;
}
depth++;
pd = 0;
nd = n * 8;
}
} else {
pd = 8; // early out because all remaining children are also zero
}
}
depth--; // done with this level
} while (depth >= j);
if (stepd > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel(int nbodiesd, float dtimed, float dthfd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
int nnodes, nbodies, step, timesteps;
register double runtime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
hipEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
printf("CUDA BarnesHut v3.1 ");
#ifdef __KEPLER__
printf("[Kepler]\n");
#else
printf("[Fermi]\n");
#endif
printf("Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.\n");
fflush(stdout);
if (argc != 4) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps device\n");
exit(-1);
}
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
const int dev = atoi(argv[3]);
if ((dev < 0) || (deviceCount <= dev)) {
fprintf(stderr, "There is no device %d\n", dev);
exit(-1);
}
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
// fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
hipFuncSetCacheConfig(BoundingBoxKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(TreeBuildingKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(ClearKernel1, hipFuncCachePreferL1);
hipFuncSetCacheConfig(ClearKernel2, hipFuncCachePreferL1);
hipFuncSetCacheConfig(SummarizationKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(SortKernel, hipFuncCachePreferL1);
#ifdef __KEPLER__
hipFuncSetCacheConfig(ForceCalculationKernel, hipFuncCachePreferEqual);
#else
hipFuncSetCacheConfig(ForceCalculationKernel, hipFuncCachePreferL1);
#endif
hipFuncSetCacheConfig(IntegrationKernel, hipFuncCachePreferL1);
hipGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
printf("configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (hipSuccess != hipMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (hipSuccess != hipMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (hipSuccess != hipMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (hipSuccess != hipMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (hipSuccess != hipMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (hipSuccess != hipMalloc((void **)&velxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velxd\n"); CudaTest("couldn't allocate velxd");
if (hipSuccess != hipMalloc((void **)&velyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velyd\n"); CudaTest("couldn't allocate velyd");
if (hipSuccess != hipMalloc((void **)&velzl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velzd\n"); CudaTest("couldn't allocate velzd");
if (hipSuccess != hipMalloc((void **)&accxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accxd\n"); CudaTest("couldn't allocate accxd");
if (hipSuccess != hipMalloc((void **)&accyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accyd\n"); CudaTest("couldn't allocate accyd");
if (hipSuccess != hipMalloc((void **)&acczl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate acczd\n"); CudaTest("couldn't allocate acczd");
if (hipSuccess != hipMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (hipSuccess != hipMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
if (hipSuccess != hipMalloc((void **)&sortl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate sortd\n"); CudaTest("couldn't allocate sortd");
if (hipSuccess != hipMalloc((void **)&maxxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (hipSuccess != hipMalloc((void **)&maxyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (hipSuccess != hipMalloc((void **)&maxzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (hipSuccess != hipMalloc((void **)&minxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (hipSuccess != hipMalloc((void **)&minyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (hipSuccess != hipMalloc((void **)&minzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
}
// generate input
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (hipSuccess != hipMemcpy(massl, mass, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (hipSuccess != hipMemcpy(posxl, posx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (hipSuccess != hipMemcpy(posyl, posy, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (hipSuccess != hipMemcpy(poszl, posz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (hipSuccess != hipMemcpy(velxl, velx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (hipSuccess != hipMemcpy(velyl, vely, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (hipSuccess != hipMemcpy(velzl, velz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (launch GPU kernels)
hipEventCreate(&start); hipEventCreate(&stop);
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitializationKernel), dim3(1), dim3(1), 0, 0, errl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
hipEventRecord(start, 0);
hipLaunchKernelGGL(( BoundingBoxKernel), dim3(blocks * FACTOR1), dim3(THREADS1), 0, 0, nnodes, nbodies, startl, childl, massl, posxl, posyl, poszl, maxxl, maxyl, maxzl, minxl, minyl, minzl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ClearKernel1), dim3(blocks * 1), dim3(1024), 0, 0, nnodes, nbodies, childl);
hipLaunchKernelGGL(( TreeBuildingKernel), dim3(blocks * FACTOR2), dim3(THREADS2), 0, 0, nnodes, nbodies, errl, childl, posxl, posyl, poszl);
hipLaunchKernelGGL(( ClearKernel2), dim3(blocks * 1), dim3(1024), 0, 0, nnodes, startl, massl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SummarizationKernel), dim3(blocks * FACTOR3), dim3(THREADS3), 0, 0, nnodes, nbodies, countl, childl, massl, posxl, posyl, poszl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SortKernel), dim3(blocks * FACTOR4), dim3(THREADS4), 0, 0, nnodes, nbodies, sortl, countl, startl, childl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ForceCalculationKernel), dim3(blocks * FACTOR5), dim3(THREADS5), 0, 0, nnodes, nbodies, errl, dthf, itolsq, epssq, sortl, childl, massl, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( IntegrationKernel), dim3(blocks * FACTOR6), dim3(THREADS6), 0, 0, nbodies, dtime, dthf, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
}
CudaTest("kernel launch failed");
hipEventDestroy(start); hipEventDestroy(stop);
// transfer result back to CPU
if (hipSuccess != hipMemcpy(&error, errl, sizeof(int), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (hipSuccess != hipMemcpy(posx, posxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (hipSuccess != hipMemcpy(posy, posyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (hipSuccess != hipMemcpy(posz, poszl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (hipSuccess != hipMemcpy(velx, velxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (hipSuccess != hipMemcpy(vely, velyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (hipSuccess != hipMemcpy(velz, velzl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
gettimeofday(&endtime, NULL);
runtime = endtime.tv_sec + endtime.tv_usec/1000000.0 - starttime.tv_sec - starttime.tv_usec/1000000.0;
printf("runtime: %.4lf s (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
printf(" %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
printf(") = %.1f ms\n", time);
} else {
printf(") = %.1f ms FAILED %d\n", time, error);
}
}
// print output
i = 0;
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
hipFree(errl);
hipFree(childl);
hipFree(massl);
hipFree(posxl);
hipFree(posyl);
hipFree(poszl);
hipFree(countl);
hipFree(startl);
hipFree(maxxl);
hipFree(maxyl);
hipFree(maxzl);
hipFree(minxl);
hipFree(minyl);
hipFree(minzl);
return 0;
}
| c83f0e0f642bb86e53ff13b1c483670a90838818.cu | /*
CUDA BarnesHut v3.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher <[email protected]>
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#ifdef __KEPLER__
// thread count
#define THREADS1 1024 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 768
#define THREADS4 128
#define THREADS5 1024
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 2
#define FACTOR2 2
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 4 /* must all be resident at the same time */
#define FACTOR5 2
#define FACTOR6 2
#else
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 512
#define THREADS3 128
#define THREADS4 64
#define THREADS5 256
#define THREADS6 1024
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 3
#define FACTOR3 6 /* must all be resident at the same time */
#define FACTOR4 6 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 1
#endif
#define WARPSIZE 32
#define MAXDEPTH 32
__device__ volatile int stepd, bottomd, maxdepthd;
__device__ unsigned int blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel(int * __restrict errd)
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel(int nnodesd, int nbodiesd, volatile int * __restrict startd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict maxxd, volatile float * __restrict maxyd, volatile float * __restrict maxzd, volatile float * __restrict minxd, volatile float * __restrict minyd, volatile float * __restrict minzd)
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = fminf(minx, val);
maxx = fmaxf(maxx, val);
val = posyd[j];
miny = fminf(miny, val);
maxy = fmaxf(maxy, val);
val = poszd[j];
minz = fminf(minz, val);
maxz = fmaxf(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = fminf(minx, sminx[k]);
smaxx[i] = maxx = fmaxf(maxx, smaxx[k]);
sminy[i] = miny = fminf(miny, sminy[k]);
smaxy[i] = maxy = fmaxf(maxy, smaxy[k]);
sminz[i] = minz = fminf(minz, sminz[k]);
smaxz[i] = maxz = fmaxf(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
__threadfence();
inc = gridDim.x - 1;
if (inc == atomicInc(&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = fminf(minx, minxd[j]);
maxx = fmaxf(maxx, maxxd[j]);
miny = fminf(miny, minyd[j]);
maxy = fmaxf(maxy, maxyd[j]);
minz = fminf(minz, minzd[j]);
maxz = fmaxf(maxz, maxzd[j]);
}
// compute 'radius'
val = fmaxf(maxx - minx, maxy - miny);
radiusd = fmaxf(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(1024, 1)
void ClearKernel1(int nnodesd, int nbodiesd, volatile int * __restrict childd)
{
register int k, inc, top, bottom;
top = 8 * nnodesd;
bottom = 8 * nbodiesd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < top) {
childd[k] = -1;
k += inc;
}
}
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, volatile int * __restrict childd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register float dx, dy, dz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius * 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (rootx < px) {j = 1; dx = r;}
if (rooty < py) {j |= 2; dy = r;}
if (rootz < pz) {j |= 4; dz = r;}
x = rootx + dx;
y = rooty + dy;
z = rootz + dz;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == -1) {
if (-1 == atomicCAS((int *)&childd[locked], -1, i)) { // if null, just insert the new body
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
} else { // there already is a body in this position
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
if (patch != -1) {
childd[n*8+j] = cell;
}
patch = max(patch, cell);
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j |= 2;
if (z < poszd[ch]) j |= 4;
childd[cell*8+j] = ch;
n = cell;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 2;
}
}
}
__syncthreads(); // __threadfence();
if (skip == 2) {
childd[locked] = patch;
}
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
__global__
__launch_bounds__(1024, 1)
void ClearKernel2(int nnodesd, volatile int * __restrict startd, volatile float * __restrict massd)
{
register int k, inc, bottom;
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
// iterate over all cells assigned to thread
while (k < nnodesd) {
massd[k] = -1.0f;
startd[k] = -1;
k += inc;
}
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel(const int nnodesd, const int nbodiesd, volatile int * __restrict countd, const int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, k, ch, inc, cnt, bottom, flag;
register float m, cm, px, py, pz;
__shared__ int child[THREADS3 * 8];
__shared__ float mass[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
register int restart = k;
for (j = 0; j < 5; j++) { // wait-free pre-passes
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] < 0.0f) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch >= nbodiesd) && ((mass[i*THREADS3+threadIdx.x] = massd[ch]) < 0.0f)) {
break;
}
}
if (i == 8) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
}
}
k += inc; // move on to next cell
}
k = restart;
}
flag = 0;
j = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (massd[k] >= 0.0f) {
k += inc;
} else {
if (j == 0) {
j = 8;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
child[i*THREADS3+threadIdx.x] = ch; // cache children
if ((ch < nbodiesd) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
} else {
j = 8;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if ((ch < nbodiesd) || (mass[i*THREADS3+threadIdx.x] >= 0.0f) || ((mass[i*THREADS3+threadIdx.x] = massd[ch]) >= 0.0f)) {
j--;
}
}
}
if (j == 0) {
// all children are ready
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
for (i = 0; i < 8; i++) {
ch = child[i*THREADS3+threadIdx.x];
if (ch >= 0) {
if (ch >= nbodiesd) { // count bodies (needed later)
m = mass[i*THREADS3+threadIdx.x];
cnt += countd[ch];
} else {
m = massd[ch];
cnt++;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
}
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
flag = 1;
}
}
__syncthreads(); // __threadfence();
if (flag != 0) {
massd[k] = cm;
k += inc;
flag = 0;
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel(int nnodesd, int nbodiesd, int * __restrict sortd, int * __restrict countd, volatile int * __restrict startd, int * __restrict childd)
{
register int i, j, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
j++;
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
}
k -= dec; // move on to next cell
}
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel(int nnodesd, int nbodiesd, volatile int * __restrict errd, float dthfd, float itolsqd, float epssqd, volatile int * __restrict sortd, volatile int * __restrict childd, volatile float * __restrict massd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, j, k, n, depth, base, sbase, diff, pd, nd;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ float dq[MAXDEPTH * THREADS5/WARPSIZE];
if (0 == threadIdx.x) {
tmp = radiusd * 2;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepthd; i++) {
dq[i] = dq[i - 1] * 0.25f;
dq[i - 1] += epssqd;
}
dq[i - 1] += epssqd;
if (maxdepthd > MAXDEPTH) {
*errd = maxdepthd;
}
}
__syncthreads();
if (maxdepthd <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
__threadfence_block();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
pos[j] = 0;
node[j] = nnodesd * 8;
}
do {
// stack is not empty
pd = pos[depth];
nd = node[depth];
while (pd < 8) {
// node on top of stack has more children to process
n = childd[nd + pd]; // load child pointer
pd++;
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
if (sbase == threadIdx.x) { // maybe don't push and inc if last child
pos[depth] = pd;
node[depth] = nd;
}
depth++;
pd = 0;
nd = n * 8;
}
} else {
pd = 8; // early out because all remaining children are also zero
}
}
depth--; // done with this level
} while (depth >= j);
if (stepd > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel(int nbodiesd, float dtimed, float dthfd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd, volatile float * __restrict velxd, volatile float * __restrict velyd, volatile float * __restrict velzd, volatile float * __restrict accxd, volatile float * __restrict accyd, volatile float * __restrict acczd)
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
int nnodes, nbodies, step, timesteps;
register double runtime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
cudaEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
printf("CUDA BarnesHut v3.1 ");
#ifdef __KEPLER__
printf("[Kepler]\n");
#else
printf("[Fermi]\n");
#endif
printf("Copyright (c) 2013, Texas State University-San Marcos. All rights reserved.\n");
fflush(stdout);
if (argc != 4) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps device\n");
exit(-1);
}
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
const int dev = atoi(argv[3]);
if ((dev < 0) || (deviceCount <= dev)) {
fprintf(stderr, "There is no device %d\n", dev);
exit(-1);
}
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
// fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
cudaFuncSetCacheConfig(BoundingBoxKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(TreeBuildingKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ClearKernel1, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ClearKernel2, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(SummarizationKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(SortKernel, cudaFuncCachePreferL1);
#ifdef __KEPLER__
cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferEqual);
#else
cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferL1);
#endif
cudaFuncSetCacheConfig(IntegrationKernel, cudaFuncCachePreferL1);
cudaGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
printf("configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (cudaSuccess != cudaMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (cudaSuccess != cudaMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (cudaSuccess != cudaMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (cudaSuccess != cudaMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (cudaSuccess != cudaMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (cudaSuccess != cudaMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (cudaSuccess != cudaMalloc((void **)&velxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velxd\n"); CudaTest("couldn't allocate velxd");
if (cudaSuccess != cudaMalloc((void **)&velyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velyd\n"); CudaTest("couldn't allocate velyd");
if (cudaSuccess != cudaMalloc((void **)&velzl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate velzd\n"); CudaTest("couldn't allocate velzd");
if (cudaSuccess != cudaMalloc((void **)&accxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accxd\n"); CudaTest("couldn't allocate accxd");
if (cudaSuccess != cudaMalloc((void **)&accyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate accyd\n"); CudaTest("couldn't allocate accyd");
if (cudaSuccess != cudaMalloc((void **)&acczl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate acczd\n"); CudaTest("couldn't allocate acczd");
if (cudaSuccess != cudaMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (cudaSuccess != cudaMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
if (cudaSuccess != cudaMalloc((void **)&sortl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate sortd\n"); CudaTest("couldn't allocate sortd");
if (cudaSuccess != cudaMalloc((void **)&maxxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (cudaSuccess != cudaMalloc((void **)&maxyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (cudaSuccess != cudaMalloc((void **)&maxzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (cudaSuccess != cudaMalloc((void **)&minxl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (cudaSuccess != cudaMalloc((void **)&minyl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (cudaSuccess != cudaMalloc((void **)&minzl, sizeof(float) * blocks * FACTOR1)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
}
// generate input
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (cudaSuccess != cudaMemcpy(massl, mass, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (cudaSuccess != cudaMemcpy(posxl, posx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (cudaSuccess != cudaMemcpy(posyl, posy, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (cudaSuccess != cudaMemcpy(poszl, posz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (cudaSuccess != cudaMemcpy(velxl, velx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (cudaSuccess != cudaMemcpy(velyl, vely, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (cudaSuccess != cudaMemcpy(velzl, velz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (launch GPU kernels)
cudaEventCreate(&start); cudaEventCreate(&stop);
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
cudaEventRecord(start, 0);
InitializationKernel<<<1, 1>>>(errl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
cudaEventRecord(start, 0);
BoundingBoxKernel<<<blocks * FACTOR1, THREADS1>>>(nnodes, nbodies, startl, childl, massl, posxl, posyl, poszl, maxxl, maxyl, maxzl, minxl, minyl, minzl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
cudaEventRecord(start, 0);
ClearKernel1<<<blocks * 1, 1024>>>(nnodes, nbodies, childl);
TreeBuildingKernel<<<blocks * FACTOR2, THREADS2>>>(nnodes, nbodies, errl, childl, posxl, posyl, poszl);
ClearKernel2<<<blocks * 1, 1024>>>(nnodes, startl, massl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
cudaEventRecord(start, 0);
SummarizationKernel<<<blocks * FACTOR3, THREADS3>>>(nnodes, nbodies, countl, childl, massl, posxl, posyl, poszl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
cudaEventRecord(start, 0);
SortKernel<<<blocks * FACTOR4, THREADS4>>>(nnodes, nbodies, sortl, countl, startl, childl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
cudaEventRecord(start, 0);
ForceCalculationKernel<<<blocks * FACTOR5, THREADS5>>>(nnodes, nbodies, errl, dthf, itolsq, epssq, sortl, childl, massl, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
cudaEventRecord(start, 0);
IntegrationKernel<<<blocks * FACTOR6, THREADS6>>>(nbodies, dtime, dthf, posxl, posyl, poszl, velxl, velyl, velzl, accxl, accyl, acczl);
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
}
CudaTest("kernel launch failed");
cudaEventDestroy(start); cudaEventDestroy(stop);
// transfer result back to CPU
if (cudaSuccess != cudaMemcpy(&error, errl, sizeof(int), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (cudaSuccess != cudaMemcpy(posx, posxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (cudaSuccess != cudaMemcpy(posy, posyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (cudaSuccess != cudaMemcpy(posz, poszl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (cudaSuccess != cudaMemcpy(velx, velxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (cudaSuccess != cudaMemcpy(vely, velyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (cudaSuccess != cudaMemcpy(velz, velzl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
gettimeofday(&endtime, NULL);
runtime = endtime.tv_sec + endtime.tv_usec/1000000.0 - starttime.tv_sec - starttime.tv_usec/1000000.0;
printf("runtime: %.4lf s (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
printf(" %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
printf(") = %.1f ms\n", time);
} else {
printf(") = %.1f ms FAILED %d\n", time, error);
}
}
// print output
i = 0;
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
cudaFree(errl);
cudaFree(childl);
cudaFree(massl);
cudaFree(posxl);
cudaFree(posyl);
cudaFree(poszl);
cudaFree(countl);
cudaFree(startl);
cudaFree(maxxl);
cudaFree(maxyl);
cudaFree(maxzl);
cudaFree(minxl);
cudaFree(minyl);
cudaFree(minzl);
return 0;
}
|
2f03fa5bc20a1c7a895baca0671cbcc950dc40ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ extern "C" int main(int argc, char** argv);
#define WARPSIZE 32
__device__ static unsigned get_lane_id(void)
{
return __nvvm_read_ptx_sreg_tid_x() & (WARPSIZE - 1);
}
__global__ extern "C" void __device_start(int argc, void* argv, int* res)
{
res[get_lane_id()] = main(argc, (char**)argv);
}
| 2f03fa5bc20a1c7a895baca0671cbcc950dc40ae.cu | __device__ extern "C" int main(int argc, char** argv);
#define WARPSIZE 32
__device__ static unsigned get_lane_id(void)
{
return __nvvm_read_ptx_sreg_tid_x() & (WARPSIZE - 1);
}
__global__ extern "C" void __device_start(int argc, void* argv, int* res)
{
res[get_lane_id()] = main(argc, (char**)argv);
}
|
6cb00a86bb43204cb67b20569c2aafe4c6e1fd8a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) 2011, Federico Raimondo ([email protected])
* Modified to build under Windows by Yunhui Zhou.
*
* This file is part of Cudaica.
*
* Cudaica is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
* Cudaica is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Cudaica. If not, see <http://www.gnu.org/licenses/>.
*/
#include <error.h>
#include <stdio.h>
#include <config.h>
#include <windows.h>
#include <hip/hip_runtime.h>
/*
* Reset the errors waiting for being fetched.
*/
void ResetError() {
hipError_t newerr = hipGetLastError();
if (newerr != hipSuccess) {
DPRINTF(1, "DEBUG::Last error %s (%x)\n", hipGetErrorString(newerr), newerr);
}
}
/*
* Handles an error
*/
void HandleError(hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
fprintf(stderr, "ERROR::%s (%x) in %s at line %d\n", hipGetErrorString( err ), err, file, line );
hipError_t newerr = hipGetLastError();
if (newerr != err && newerr != hipSuccess) {
DPRINTF(1, "DEBUG::Another error %s (%x) in %s at line %d\n", hipGetErrorString(newerr), newerr, file, line);
}
exit( EXIT_FAILURE );
}
}
| 6cb00a86bb43204cb67b20569c2aafe4c6e1fd8a.cu | /*
* Copyright (C) 2011, Federico Raimondo ([email protected])
* Modified to build under Windows by Yunhui Zhou.
*
* This file is part of Cudaica.
*
* Cudaica is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
* Cudaica is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Cudaica. If not, see <http://www.gnu.org/licenses/>.
*/
#include <error.h>
#include <stdio.h>
#include <config.h>
#include <windows.h>
#include <cuda_runtime.h>
/*
* Reset the errors waiting for being fetched.
*/
void ResetError() {
cudaError_t newerr = cudaGetLastError();
if (newerr != cudaSuccess) {
DPRINTF(1, "DEBUG::Last error %s (%x)\n", cudaGetErrorString(newerr), newerr);
}
}
/*
* Handles an error
*/
void HandleError(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
fprintf(stderr, "ERROR::%s (%x) in %s at line %d\n", cudaGetErrorString( err ), err, file, line );
cudaError_t newerr = cudaGetLastError();
if (newerr != err && newerr != cudaSuccess) {
DPRINTF(1, "DEBUG::Another error %s (%x) in %s at line %d\n", cudaGetErrorString(newerr), newerr, file, line);
}
exit( EXIT_FAILURE );
}
}
|
cu_sigmoid.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cu_sigmoid(double* src, double* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
double tmp = __fmul_rd(src[tid], -1.0);
tmp = __expf(tmp);
tmp = __fadd_rd(tmp, 1.0);
dst[tid] = __fdividef(1.0, tmp);
tid += stride;
}
} | cu_sigmoid.cu | #include "includes.h"
__global__ void cu_sigmoid(double* src, double* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
double tmp = __fmul_rd(src[tid], -1.0);
tmp = __expf(tmp);
tmp = __fadd_rd(tmp, 1.0);
dst[tid] = __fdividef(1.0, tmp);
tid += stride;
}
} |
d4c7add8420f06356033e69ddd358aff2f3d01e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialClassNLLCriterion.cu"
#else
void THNN_(SpatialClassNLLCriterion_shapeCheck)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *weights)
{
THArgCheck(THCIndexTensor_(nDimension)(state, target) == 3, 1,
"only batches of spatial targets supported (3D tensors)" \
" but got targets of dimension: %d",
THCIndexTensor_(nDimension)(state, target));
THArgCheck(THCTensor_(nDimension)(state, input) == 4, 2,
"only batches of spatial inputs supported (4D tensors), " \
"but got input of dimension: %d", THCTensor_(nDimension)(state, input));
if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff input_size = THCTensor_(sizeDesc)(state, input);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("input and target batch or spatial sizes don't match: target %s, input %s",
target_size.str, input_size.str);
}
if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) {
THError("weight tensor should be defined either for all or no classes");
}
}
void THNN_(SpatialClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
long ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
if (weights)
THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, output, total_weight);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *input_data = THCTensor_(data)(state, input);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *output_data = THCTensor_(data)(state, output);
real *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
THCTensor_(fill)(state, output, ScalarConvert<int, real>::to(0));
THCTensor_(fill)(state, total_weight, ScalarConvert<int, real>::to(0));
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateOutput_kernel<real, accreal>)
, dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(hipGetLastError());
if (sizeAverage) {
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_sizeAverage_kernel), dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
output_data, total_weight_data
);
THCudaCheck(hipGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradInput,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
long ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4,
"gradInput must be contiguous");
if (weights)
THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
real *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateGradInput_kernel)
, dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
gradInput_data,
target_data,
weights_data,
total_weight_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(hipGetLastError());
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
#endif
| d4c7add8420f06356033e69ddd358aff2f3d01e0.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialClassNLLCriterion.cu"
#else
void THNN_(SpatialClassNLLCriterion_shapeCheck)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *weights)
{
THArgCheck(THCIndexTensor_(nDimension)(state, target) == 3, 1,
"only batches of spatial targets supported (3D tensors)" \
" but got targets of dimension: %d",
THCIndexTensor_(nDimension)(state, target));
THArgCheck(THCTensor_(nDimension)(state, input) == 4, 2,
"only batches of spatial inputs supported (4D tensors), " \
"but got input of dimension: %d", THCTensor_(nDimension)(state, input));
if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) ||
THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) ||
THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) {
THCDescBuff input_size = THCTensor_(sizeDesc)(state, input);
THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target);
THError("input and target batch or spatial sizes don't match: target %s, input %s",
target_size.str, input_size.str);
}
if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) {
THError("weight tensor should be defined either for all or no classes");
}
}
void THNN_(SpatialClassNLLCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
long ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
if (weights)
THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, output, total_weight);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *input_data = THCTensor_(data)(state, input);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *output_data = THCTensor_(data)(state, output);
real *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
THCTensor_(fill)(state, output, ScalarConvert<int, real>::to(0));
THCTensor_(fill)(state, total_weight, ScalarConvert<int, real>::to(0));
cunn_SpatialClassNLLCriterion_updateOutput_kernel<real, accreal>
<<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
output_data,
total_weight_data,
input_data,
target_data,
weights_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(cudaGetLastError());
if (sizeAverage) {
cunn_SpatialClassNLLCriterion_sizeAverage_kernel<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
output_data, total_weight_data
);
THCudaCheck(cudaGetLastError());
}
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradInput,
bool sizeAverage,
THCTensor *weights,
THCTensor *total_weight,
long ignore_index)
{
THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights);
THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4,
"gradInput must be contiguous");
if (weights)
THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight);
else
THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight);
input = THCTensor_(newContiguous)(state, input);
weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;
target = THCIndexTensor_(newContiguous)(state, target);
real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;
real *gradInput_data = THCTensor_(data)(state, gradInput);
THCIndex_t *target_data = THCIndexTensor_(data)(state, target);
real *total_weight_data = THCTensor_(data)(state, total_weight);
THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0);
THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size;
int blocks_per_sample = GET_BLOCKS(map_nelem) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
cunn_SpatialClassNLLCriterion_updateGradInput_kernel
<<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
gradInput_data,
target_data,
weights_data,
total_weight_data,
sizeAverage,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3),
blocks_per_sample,
ignore_index
);
THCudaCheck(cudaGetLastError());
if (weights)
THCTensor_(free)(state, weights);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, input);
}
#endif
|
a47d198f74dbf6d7f1b1bfb273db91f63347b295.hip | // !!! This is a file automatically generated by hipify!!!
/*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2015
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "stdio.h"
#include "cudadebug.h"
#include "cudalang.h"
extern "C"
{
#include "core.h"
#include "vector.h"
}
#include "debug.h"
#define VALUE_TYPE hipDoubleComplex
#define TYPE_SYMBOL Z
#include "setscal_base.cuh"
| a47d198f74dbf6d7f1b1bfb273db91f63347b295.cu | /*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2015
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "stdio.h"
#include "cudadebug.h"
#include "cudalang.h"
extern "C"
{
#include "core.h"
#include "vector.h"
}
#include "debug.h"
#define VALUE_TYPE cuDoubleComplex
#define TYPE_SYMBOL Z
#include "setscal_base.cuh"
|
8222c0570b461c07df5a70691fd5401b84aa3a0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* runmcmc.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 5.0 (c) 2012, The University of
Oxford (the "Software")
The Software remains the property of the University of Oxford ("the
University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Isis
Innovation Limited ("Isis"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting reference DE/9564. */
#include "xfibresoptions.h"
#include <hiprand/hiprand.h>
#include "runmcmc_kernels.hip"
#include "sync_check.h"
#include <host_vector.h>
#include <device_vector.h>
#include <time.h>
#include <sys/time.h>
#include "init_gpu.h"
using namespace Xfibres;
//////////////////////////////////////////////////////
// MCMC IN GPU
//////////////////////////////////////////////////////
void init_Fibres_Multifibres( //INPUT
thrust::device_vector<float> datam_gpu,
thrust::device_vector<float> params_gpu,
thrust::device_vector<float> tau_gpu,
thrust::device_vector<float> bvals_gpu,
thrust::device_vector<double> alpha_gpu,
thrust::device_vector<double> beta_gpu,
const int ndirections,
string output_file,
//OUTPUT
thrust::device_vector<FibreGPU>& fibres_gpu,
thrust::device_vector<MultifibreGPU>& multifibres_gpu,
thrust::device_vector<double>& signals_gpu,
thrust::device_vector<double>& isosignals_gpu)
{
std::ofstream myfile;
myfile.open (output_file.data(), ios::out | ios::app );
myfile << "----- MCMC ALGORITHM PART INITIALITATION IN GPU ----- " << "\n";
struct timeval t1,t2;
double time;
gettimeofday(&t1,NULL);
int nvox = multifibres_gpu.size();
xfibresOptions& opts = xfibresOptions::getInstance();
int nfib= opts.nfibres.value();
int nparams_fit = 2+3*opts.nfibres.value();
if(opts.modelnum.value()>=2) nparams_fit++;
if(opts.f0.value()) nparams_fit++;
thrust::device_vector<double> angtmp_gpu;
angtmp_gpu.resize(nvox*ndirections*nfib);
bool gradnonlin = opts.grad_file.set();
int blocks = nvox;
dim3 Dim_Grid_MCMC(blocks, 1);
dim3 Dim_Block_MCMC(THREADS_BLOCK_MCMC ,1); ///dimensions for MCMC
float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data());
float *params_ptr = thrust::raw_pointer_cast(params_gpu.data());
float *tau_ptr = thrust::raw_pointer_cast(tau_gpu.data());
float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data());
double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data());
double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data());
FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data());
MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data());
double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data());
double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data());
double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data());
int amount_shared = (THREADS_BLOCK_MCMC)*sizeof(double) + (3*nfib + 9)*sizeof(float) + sizeof(int);
myfile << "Shared Memory Used in init_Fibres_Multifibres: " << amount_shared << "\n";
hipLaunchKernelGGL(( init_Fibres_Multifibres_kernel), dim3(Dim_Grid_MCMC), dim3(Dim_Block_MCMC), amount_shared, 0, datam_ptr, params_ptr, tau_ptr, bvals_ptr, alpha_ptr, beta_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams_fit, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.rician.value(), opts.ardf0.value(), opts.all_ard.value(), opts.no_ard.value(), gradnonlin, angtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr);
sync_check("init_Fibres_Multifibres_kernel");
gettimeofday(&t2,NULL);
time=timeval_diff(&t2,&t1);
myfile << "TIME TOTAL: " << time << " seconds\n";
myfile << "-----------------------------------------------------" << "\n\n" ;
myfile.close();
}
void runmcmc_burnin( //INPUT
thrust::device_vector<float> datam_gpu,
thrust::device_vector<float> bvals_gpu,
thrust::device_vector<double> alpha_gpu,
thrust::device_vector<double> beta_gpu,
const int ndirections,
double seed,
string output_file,
//INPUT-OUTPUT
thrust::device_vector<FibreGPU>& fibres_gpu,
thrust::device_vector<MultifibreGPU>& multifibres_gpu,
thrust::device_vector<double>& signals_gpu,
thrust::device_vector<double>& isosignals_gpu)
{
xfibresOptions& opts = xfibresOptions::getInstance();
std::ofstream myfile;
myfile.open (output_file.data(), ios::out | ios::app );
myfile << "--------- MCMC ALGORITHM PART BURNIN IN GPU --------- " << "\n";
struct timeval t1,t2,t_tot1,t_tot2;
double time,timecurand,timemcmc;
time=0;
timecurand=0;
timemcmc=0;
gettimeofday(&t_tot1,NULL);
size_t free,total;
int nvox = multifibres_gpu.size();
int nfib= opts.nfibres.value();
int nparams;
bool gradnonlin=opts.grad_file.set();
if(opts.f0.value()) nparams=3+nfib*3;
else nparams=2+nfib*3;
if(opts.modelnum.value()>=2) nparams++;
if(opts.modelnum.value()==3) nparams++;
if(opts.rician.value()) nparams++;
thrust::device_vector<float> recors_null_gpu;
recors_null_gpu.resize(1);
thrust::device_vector<double> angtmp_gpu;
thrust::device_vector<double> oldangtmp_gpu;
thrust::device_vector<double> oldsignals_gpu;
thrust::device_vector<double> oldisosignals_gpu;
angtmp_gpu.resize(nvox*ndirections*nfib);
oldangtmp_gpu.resize(nvox*ndirections);
oldsignals_gpu.resize(nvox*ndirections*nfib);
oldisosignals_gpu.resize(nvox*ndirections);
unsigned int totalrandoms=(opts.nburn.value() * nvox * nparams);
cuMemGetInfo(&free,&total);
myfile << "Free memory Before Randoms: "<< free << " ---- Total memory: " << total << "\n";
//4 bytes each float, 2 random arrays, and 80% of total memory at this moment
unsigned int maxrandoms=((free*0.8)/(4*2));
myfile << "Total randoms: " << totalrandoms << "\n";
myfile << "Max randoms: " << maxrandoms << "\n";
int steps; //num iter if not enough memory
int minrandoms; //min num of randoms ensamble
minrandoms= nvox * nparams;
int iters_step=0;
int nrandoms=0;
if(totalrandoms>maxrandoms){
iters_step = maxrandoms / minrandoms; //iterations in each step
nrandoms = iters_step*minrandoms; //nrandoms for each step
steps = (opts.nburn.value()/iters_step); //repeat process steps times, no enough memory for all randoms
}else{
nrandoms = totalrandoms;
iters_step= opts.nburn.value();
steps = 0;
}
if(nrandoms%2){ //CURAND must generates multiples of 2 randoms
nrandoms++;
}
myfile << "Process " << opts.nburn.value() << " iterations divided in "<< steps << " steps with "<< iters_step << " iterations in each one" << "\n";
int last_step = opts.nburn.value() - (iters_step*steps);
int last_randoms = (last_step*minrandoms);
if(last_randoms%2){ //CURAND must generates multiples of 2 randoms
last_randoms++;
}
myfile << "Last step with " << last_step << " iterations" << "\n";
thrust::device_vector<float> randomsN_gpu;
thrust::device_vector<float> randomsU_gpu;
randomsN_gpu.resize(nrandoms);
randomsU_gpu.resize(nrandoms);
cuMemGetInfo(&free,&total);
myfile << "Free memory after Malloc Randoms: "<< free << " ---- Total memory: " << total << "\n";
int blocks = nvox;
dim3 Dim_Grid(blocks, 1);
dim3 Dim_Block(THREADS_BLOCK_MCMC,1); //dimensions for MCMC
myfile << "\n" << "NUM BLOCKS: " << blocks << "\n";
myfile << "THREADS PER BLOCK : " << THREADS_BLOCK_MCMC << "\n\n";
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen,HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen,seed);
//get pointers
float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data());
float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data());
double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data());
double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data());
float *randomsN_ptr = thrust::raw_pointer_cast(randomsN_gpu.data());
float *randomsU_ptr = thrust::raw_pointer_cast(randomsU_gpu.data());
FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data());
MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data());
double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data());
double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data());
double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data());
double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data());
double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data());
double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data());
float *records_null = thrust::raw_pointer_cast(recors_null_gpu.data());
int amount_shared = (THREADS_BLOCK_MCMC)*sizeof(double) + (10*nfib + 2*nparams + 27)*sizeof(float) + (7*nfib + 21)*sizeof(int);
myfile << "Shared Memory Used in runmcmc_burnin: " << amount_shared << "\n";
for(int i=0;i<steps;i++){
gettimeofday(&t1,NULL);
hiprandStatus_t status = hiprandGenerateNormal(gen,randomsN_ptr,nrandoms,0,1);
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = hiprandGenerateUniform(gen,randomsU_ptr,nrandoms); //generate randoms
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
hipLaunchKernelGGL(( runmcmc_kernel), dim3(Dim_Grid), dim3(Dim_Block), amount_shared , 0, datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), iters_step, (i*iters_step), 0, 0, 0, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr,records_null,records_null,records_null,records_null,records_null,records_null,records_null,records_null, records_null);
sync_check("runmcmc_burnin_kernel");
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
}
gettimeofday(&t1,NULL);
if(nvox!=0){
hiprandStatus_t status = hiprandGenerateNormal(gen,randomsN_ptr,last_randoms,0,1);
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = hiprandGenerateUniform(gen,randomsU_ptr,last_randoms); //generate randoms
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
if(nvox!=0){
hipLaunchKernelGGL(( runmcmc_kernel), dim3(Dim_Grid), dim3(Dim_Block), amount_shared , 0, datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), last_step, (steps*iters_step), 0, 0, 0, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr,records_null,records_null,records_null,records_null,records_null,records_null,records_null, records_null,records_null);
sync_check("runmcmc_burnin_kernel");
}
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
myfile << "TIME CURAND: " << timecurand << " seconds\n";
myfile << "TIME RUNMCMC: " << timemcmc << " seconds\n";
hiprandDestroyGenerator(gen);
gettimeofday(&t_tot2,NULL);
time=timeval_diff(&t_tot2,&t_tot1);
myfile << "TIME TOTAL: " << time << " seconds\n";
myfile << "-----------------------------------------------------" << "\n\n" ;
myfile.close();
sync_check("runmcmc_burnin");
}
void runmcmc_record( //INPUT
thrust::device_vector<float> datam_gpu,
thrust::device_vector<float> bvals_gpu,
thrust::device_vector<double> alpha_gpu,
thrust::device_vector<double> beta_gpu,
thrust::device_vector<FibreGPU> fibres_gpu,
thrust::device_vector<MultifibreGPU> multifibres_gpu,
thrust::device_vector<double> signals_gpu,
thrust::device_vector<double> isosignals_gpu,
const int ndirections,
double seed,
string output_file,
//OUTPUT
thrust::device_vector<float>& rf0_gpu,
thrust::device_vector<float>& rtau_gpu,
thrust::device_vector<float>& rs0_gpu,
thrust::device_vector<float>& rd_gpu,
thrust::device_vector<float>& rdstd_gpu,
thrust::device_vector<float>& rR_gpu,
thrust::device_vector<float>& rth_gpu,
thrust::device_vector<float>& rph_gpu,
thrust::device_vector<float>& rf_gpu)
{
xfibresOptions& opts = xfibresOptions::getInstance();
std::ofstream myfile;
myfile.open (output_file.data(), ios::out | ios::app );
myfile << "--------- MCMC ALGORITHM PART RECORD IN GPU --------- " << "\n";
struct timeval t1,t2,t_tot1,t_tot2;
double time,timecurand,timemcmc;
time=0;
timecurand=0;
timemcmc=0;
gettimeofday(&t_tot1,NULL);
size_t free,total;
int totalrecords = (opts.njumps.value()/opts.sampleevery.value());
int nvox = multifibres_gpu.size();
int nfib= opts.nfibres.value();
int nparams;
bool gradnonlin=opts.grad_file.set();
if(opts.f0.value()) nparams=3+nfib*3;
else nparams=2+nfib*3;
if(opts.modelnum.value()>=2) nparams++;
if(opts.modelnum.value()==3) nparams++;
if(opts.rician.value()) nparams++;
thrust::device_vector<double> angtmp_gpu;
thrust::device_vector<double> oldangtmp_gpu;
thrust::device_vector<double> oldsignals_gpu;
thrust::device_vector<double> oldisosignals_gpu;
angtmp_gpu.resize(nvox*ndirections*nfib);
oldangtmp_gpu.resize(nvox*ndirections);
oldsignals_gpu.resize(nvox*ndirections*nfib);
oldisosignals_gpu.resize(nvox*ndirections);
unsigned int totalrandoms=(opts.njumps.value() * nvox * nparams);
cuMemGetInfo(&free,&total);
myfile << "Free memory Before Randoms: "<< free << " ---- Total memory: " << total << "\n";
//4 bytes each float, 2 random arrays, and 80% of total memory at this moment
unsigned int maxrandoms=((free*0.8)/(4*2));
myfile << "Total randoms: " << totalrandoms << "\n";
myfile << "Max randoms: " << maxrandoms << "\n";
int steps; //num iter if not enough memory
int minrandoms; //min num of randoms ensamble
minrandoms= nvox * nparams;
int iters_step=0;
int nrandoms=0;
if(totalrandoms>maxrandoms){
iters_step = maxrandoms / minrandoms; //iterations in each step
nrandoms = iters_step*minrandoms; //nrandoms for each step
steps = (opts.njumps.value()/iters_step); //repeat process steps times, no enough memory for all randoms
}else{
nrandoms = totalrandoms;
iters_step= opts.njumps.value();
steps = 0;
}
if(nrandoms%2){ //CURAND must generates multiples of 2 randoms
nrandoms++;
}
myfile << "Process " << opts.njumps.value() << " iterations divided in "<< steps << " steps with "<< iters_step << " iterations in each one" << "\n";
int last_step = opts.njumps.value() - (iters_step*steps);
int last_randoms = (last_step*minrandoms);
if(last_randoms%2){ //CURAND must generates multiples of 2 randoms
last_randoms++;
}
myfile << "Last step with " << last_step << " iterations" << "\n";
thrust::device_vector<float> randomsN_gpu;
thrust::device_vector<float> randomsU_gpu;
randomsN_gpu.resize(nrandoms);
randomsU_gpu.resize(nrandoms);
cuMemGetInfo(&free,&total);
myfile << "Free memory after Malloc Randoms: "<< free << " ---- Total memory: " << total << "\n";
int blocks = nvox;
dim3 Dim_Grid(blocks, 1);
dim3 Dim_Block(THREADS_BLOCK_MCMC,1); //dimensions for MCMC
myfile << "\n" << "NUM BLOCKS: " << blocks << "\n";
myfile << "THREADS PER BLOCK : " << THREADS_BLOCK_MCMC << "\n\n";
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen,HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen,seed);
//get pointers
float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data());
float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data());
double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data());
double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data());
float *randomsN_ptr = thrust::raw_pointer_cast(randomsN_gpu.data());
float *randomsU_ptr = thrust::raw_pointer_cast(randomsU_gpu.data());
FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data());
MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data());
double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data());
double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data());
double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data());
double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data());
double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data());
double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data());
float *rf0_ptr = thrust::raw_pointer_cast(rf0_gpu.data());
float *rtau_ptr = thrust::raw_pointer_cast(rtau_gpu.data());
float *rs0_ptr = thrust::raw_pointer_cast(rs0_gpu.data());
float *rd_ptr = thrust::raw_pointer_cast(rd_gpu.data());
float *rdstd_ptr = thrust::raw_pointer_cast(rdstd_gpu.data());
float *rR_ptr = thrust::raw_pointer_cast(rR_gpu.data());
float *rth_ptr = thrust::raw_pointer_cast(rth_gpu.data());
float *rph_ptr = thrust::raw_pointer_cast(rph_gpu.data());
float *rf_ptr = thrust::raw_pointer_cast(rf_gpu.data());
int amount_shared = (THREADS_BLOCK_MCMC)*sizeof(double) + (10*nfib + 2*nparams + 27)*sizeof(float) + (7*nfib + 21)*sizeof(int);
myfile << "Shared Memory Used in runmcmc_record: " << amount_shared << "\n";
for(int i=0;i<steps;i++){
gettimeofday(&t1,NULL);
hiprandStatus_t status = hiprandGenerateNormal(gen,randomsN_ptr,nrandoms,0,1);
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = hiprandGenerateUniform(gen,randomsU_ptr,nrandoms); //generate randoms
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
hipLaunchKernelGGL(( runmcmc_kernel), dim3(Dim_Grid), dim3(Dim_Block), amount_shared , 0, datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), iters_step, (i*iters_step), opts.nburn.value(), opts.sampleevery.value(), totalrecords, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr, rf0_ptr, rtau_ptr, rs0_ptr, rd_ptr, rdstd_ptr, rR_ptr, rth_ptr, rph_ptr, rf_ptr);
sync_check("runmcmc_record_kernel");
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
}
gettimeofday(&t1,NULL);
if(nvox!=0){
hiprandStatus_t status = hiprandGenerateNormal(gen,randomsN_ptr,last_randoms,0,1);
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = hiprandGenerateUniform(gen,randomsU_ptr,last_randoms); //generate randoms
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
if(nvox!=0){
hipLaunchKernelGGL(( runmcmc_kernel), dim3(Dim_Grid), dim3(Dim_Block), amount_shared , 0, datam_ptr, bvals_ptr, alpha_ptr, beta_ptr,randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), last_step, (steps*iters_step), opts.nburn.value(), opts.sampleevery.value(), totalrecords, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr, rf0_ptr, rtau_ptr, rs0_ptr, rd_ptr, rdstd_ptr, rR_ptr, rth_ptr, rph_ptr, rf_ptr);
sync_check("runmcmc_record_kernel");
}
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
myfile << "TIME CURAND: " << timecurand << " seconds\n";
myfile << "TIME RUNMCMC: " << timemcmc << " seconds\n";
hiprandDestroyGenerator(gen);
gettimeofday(&t_tot2,NULL);
time=timeval_diff(&t_tot2,&t_tot1);
myfile << "TIME TOTAL: " << time << " seconds\n";
myfile << "-----------------------------------------------------" << "\n" ;
myfile.close();
sync_check("runmcmc_record");
}
| 8222c0570b461c07df5a70691fd5401b84aa3a0f.cu | /* runmcmc.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 5.0 (c) 2012, The University of
Oxford (the "Software")
The Software remains the property of the University of Oxford ("the
University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Isis
Innovation Limited ("Isis"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting reference DE/9564. */
#include "xfibresoptions.h"
#include <curand.h>
#include "runmcmc_kernels.cu"
#include "sync_check.h"
#include <host_vector.h>
#include <device_vector.h>
#include <time.h>
#include <sys/time.h>
#include "init_gpu.h"
using namespace Xfibres;
//////////////////////////////////////////////////////
// MCMC IN GPU
//////////////////////////////////////////////////////
void init_Fibres_Multifibres( //INPUT
thrust::device_vector<float> datam_gpu,
thrust::device_vector<float> params_gpu,
thrust::device_vector<float> tau_gpu,
thrust::device_vector<float> bvals_gpu,
thrust::device_vector<double> alpha_gpu,
thrust::device_vector<double> beta_gpu,
const int ndirections,
string output_file,
//OUTPUT
thrust::device_vector<FibreGPU>& fibres_gpu,
thrust::device_vector<MultifibreGPU>& multifibres_gpu,
thrust::device_vector<double>& signals_gpu,
thrust::device_vector<double>& isosignals_gpu)
{
std::ofstream myfile;
myfile.open (output_file.data(), ios::out | ios::app );
myfile << "----- MCMC ALGORITHM PART INITIALITATION IN GPU ----- " << "\n";
struct timeval t1,t2;
double time;
gettimeofday(&t1,NULL);
int nvox = multifibres_gpu.size();
xfibresOptions& opts = xfibresOptions::getInstance();
int nfib= opts.nfibres.value();
int nparams_fit = 2+3*opts.nfibres.value();
if(opts.modelnum.value()>=2) nparams_fit++;
if(opts.f0.value()) nparams_fit++;
thrust::device_vector<double> angtmp_gpu;
angtmp_gpu.resize(nvox*ndirections*nfib);
bool gradnonlin = opts.grad_file.set();
int blocks = nvox;
dim3 Dim_Grid_MCMC(blocks, 1);
dim3 Dim_Block_MCMC(THREADS_BLOCK_MCMC ,1); ///dimensions for MCMC
float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data());
float *params_ptr = thrust::raw_pointer_cast(params_gpu.data());
float *tau_ptr = thrust::raw_pointer_cast(tau_gpu.data());
float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data());
double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data());
double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data());
FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data());
MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data());
double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data());
double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data());
double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data());
int amount_shared = (THREADS_BLOCK_MCMC)*sizeof(double) + (3*nfib + 9)*sizeof(float) + sizeof(int);
myfile << "Shared Memory Used in init_Fibres_Multifibres: " << amount_shared << "\n";
init_Fibres_Multifibres_kernel<<< Dim_Grid_MCMC, Dim_Block_MCMC, amount_shared>>>(datam_ptr, params_ptr, tau_ptr, bvals_ptr, alpha_ptr, beta_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams_fit, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.rician.value(), opts.ardf0.value(), opts.all_ard.value(), opts.no_ard.value(), gradnonlin, angtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr);
sync_check("init_Fibres_Multifibres_kernel");
gettimeofday(&t2,NULL);
time=timeval_diff(&t2,&t1);
myfile << "TIME TOTAL: " << time << " seconds\n";
myfile << "-----------------------------------------------------" << "\n\n" ;
myfile.close();
}
void runmcmc_burnin( //INPUT
thrust::device_vector<float> datam_gpu,
thrust::device_vector<float> bvals_gpu,
thrust::device_vector<double> alpha_gpu,
thrust::device_vector<double> beta_gpu,
const int ndirections,
double seed,
string output_file,
//INPUT-OUTPUT
thrust::device_vector<FibreGPU>& fibres_gpu,
thrust::device_vector<MultifibreGPU>& multifibres_gpu,
thrust::device_vector<double>& signals_gpu,
thrust::device_vector<double>& isosignals_gpu)
{
xfibresOptions& opts = xfibresOptions::getInstance();
std::ofstream myfile;
myfile.open (output_file.data(), ios::out | ios::app );
myfile << "--------- MCMC ALGORITHM PART BURNIN IN GPU --------- " << "\n";
struct timeval t1,t2,t_tot1,t_tot2;
double time,timecurand,timemcmc;
time=0;
timecurand=0;
timemcmc=0;
gettimeofday(&t_tot1,NULL);
size_t free,total;
int nvox = multifibres_gpu.size();
int nfib= opts.nfibres.value();
int nparams;
bool gradnonlin=opts.grad_file.set();
if(opts.f0.value()) nparams=3+nfib*3;
else nparams=2+nfib*3;
if(opts.modelnum.value()>=2) nparams++;
if(opts.modelnum.value()==3) nparams++;
if(opts.rician.value()) nparams++;
thrust::device_vector<float> recors_null_gpu;
recors_null_gpu.resize(1);
thrust::device_vector<double> angtmp_gpu;
thrust::device_vector<double> oldangtmp_gpu;
thrust::device_vector<double> oldsignals_gpu;
thrust::device_vector<double> oldisosignals_gpu;
angtmp_gpu.resize(nvox*ndirections*nfib);
oldangtmp_gpu.resize(nvox*ndirections);
oldsignals_gpu.resize(nvox*ndirections*nfib);
oldisosignals_gpu.resize(nvox*ndirections);
unsigned int totalrandoms=(opts.nburn.value() * nvox * nparams);
cuMemGetInfo(&free,&total);
myfile << "Free memory Before Randoms: "<< free << " ---- Total memory: " << total << "\n";
//4 bytes each float, 2 random arrays, and 80% of total memory at this moment
unsigned int maxrandoms=((free*0.8)/(4*2));
myfile << "Total randoms: " << totalrandoms << "\n";
myfile << "Max randoms: " << maxrandoms << "\n";
int steps; //num iter if not enough memory
int minrandoms; //min num of randoms ensamble
minrandoms= nvox * nparams;
int iters_step=0;
int nrandoms=0;
if(totalrandoms>maxrandoms){
iters_step = maxrandoms / minrandoms; //iterations in each step
nrandoms = iters_step*minrandoms; //nrandoms for each step
steps = (opts.nburn.value()/iters_step); //repeat process steps times, no enough memory for all randoms
}else{
nrandoms = totalrandoms;
iters_step= opts.nburn.value();
steps = 0;
}
if(nrandoms%2){ //CURAND must generates multiples of 2 randoms
nrandoms++;
}
myfile << "Process " << opts.nburn.value() << " iterations divided in "<< steps << " steps with "<< iters_step << " iterations in each one" << "\n";
int last_step = opts.nburn.value() - (iters_step*steps);
int last_randoms = (last_step*minrandoms);
if(last_randoms%2){ //CURAND must generates multiples of 2 randoms
last_randoms++;
}
myfile << "Last step with " << last_step << " iterations" << "\n";
thrust::device_vector<float> randomsN_gpu;
thrust::device_vector<float> randomsU_gpu;
randomsN_gpu.resize(nrandoms);
randomsU_gpu.resize(nrandoms);
cuMemGetInfo(&free,&total);
myfile << "Free memory after Malloc Randoms: "<< free << " ---- Total memory: " << total << "\n";
int blocks = nvox;
dim3 Dim_Grid(blocks, 1);
dim3 Dim_Block(THREADS_BLOCK_MCMC,1); //dimensions for MCMC
myfile << "\n" << "NUM BLOCKS: " << blocks << "\n";
myfile << "THREADS PER BLOCK : " << THREADS_BLOCK_MCMC << "\n\n";
curandGenerator_t gen;
curandCreateGenerator(&gen,CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen,seed);
//get pointers
float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data());
float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data());
double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data());
double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data());
float *randomsN_ptr = thrust::raw_pointer_cast(randomsN_gpu.data());
float *randomsU_ptr = thrust::raw_pointer_cast(randomsU_gpu.data());
FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data());
MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data());
double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data());
double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data());
double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data());
double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data());
double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data());
double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data());
float *records_null = thrust::raw_pointer_cast(recors_null_gpu.data());
int amount_shared = (THREADS_BLOCK_MCMC)*sizeof(double) + (10*nfib + 2*nparams + 27)*sizeof(float) + (7*nfib + 21)*sizeof(int);
myfile << "Shared Memory Used in runmcmc_burnin: " << amount_shared << "\n";
for(int i=0;i<steps;i++){
gettimeofday(&t1,NULL);
curandStatus_t status = curandGenerateNormal(gen,randomsN_ptr,nrandoms,0,1);
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = curandGenerateUniform(gen,randomsU_ptr,nrandoms); //generate randoms
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
runmcmc_kernel<<< Dim_Grid, Dim_Block, amount_shared >>>(datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), iters_step, (i*iters_step), 0, 0, 0, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr,records_null,records_null,records_null,records_null,records_null,records_null,records_null,records_null, records_null);
sync_check("runmcmc_burnin_kernel");
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
}
gettimeofday(&t1,NULL);
if(nvox!=0){
curandStatus_t status = curandGenerateNormal(gen,randomsN_ptr,last_randoms,0,1);
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = curandGenerateUniform(gen,randomsU_ptr,last_randoms); //generate randoms
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
if(nvox!=0){
runmcmc_kernel<<< Dim_Grid, Dim_Block, amount_shared >>>(datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), last_step, (steps*iters_step), 0, 0, 0, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr,records_null,records_null,records_null,records_null,records_null,records_null,records_null, records_null,records_null);
sync_check("runmcmc_burnin_kernel");
}
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
myfile << "TIME CURAND: " << timecurand << " seconds\n";
myfile << "TIME RUNMCMC: " << timemcmc << " seconds\n";
curandDestroyGenerator(gen);
gettimeofday(&t_tot2,NULL);
time=timeval_diff(&t_tot2,&t_tot1);
myfile << "TIME TOTAL: " << time << " seconds\n";
myfile << "-----------------------------------------------------" << "\n\n" ;
myfile.close();
sync_check("runmcmc_burnin");
}
void runmcmc_record( //INPUT
thrust::device_vector<float> datam_gpu,
thrust::device_vector<float> bvals_gpu,
thrust::device_vector<double> alpha_gpu,
thrust::device_vector<double> beta_gpu,
thrust::device_vector<FibreGPU> fibres_gpu,
thrust::device_vector<MultifibreGPU> multifibres_gpu,
thrust::device_vector<double> signals_gpu,
thrust::device_vector<double> isosignals_gpu,
const int ndirections,
double seed,
string output_file,
//OUTPUT
thrust::device_vector<float>& rf0_gpu,
thrust::device_vector<float>& rtau_gpu,
thrust::device_vector<float>& rs0_gpu,
thrust::device_vector<float>& rd_gpu,
thrust::device_vector<float>& rdstd_gpu,
thrust::device_vector<float>& rR_gpu,
thrust::device_vector<float>& rth_gpu,
thrust::device_vector<float>& rph_gpu,
thrust::device_vector<float>& rf_gpu)
{
xfibresOptions& opts = xfibresOptions::getInstance();
std::ofstream myfile;
myfile.open (output_file.data(), ios::out | ios::app );
myfile << "--------- MCMC ALGORITHM PART RECORD IN GPU --------- " << "\n";
struct timeval t1,t2,t_tot1,t_tot2;
double time,timecurand,timemcmc;
time=0;
timecurand=0;
timemcmc=0;
gettimeofday(&t_tot1,NULL);
size_t free,total;
int totalrecords = (opts.njumps.value()/opts.sampleevery.value());
int nvox = multifibres_gpu.size();
int nfib= opts.nfibres.value();
int nparams;
bool gradnonlin=opts.grad_file.set();
if(opts.f0.value()) nparams=3+nfib*3;
else nparams=2+nfib*3;
if(opts.modelnum.value()>=2) nparams++;
if(opts.modelnum.value()==3) nparams++;
if(opts.rician.value()) nparams++;
thrust::device_vector<double> angtmp_gpu;
thrust::device_vector<double> oldangtmp_gpu;
thrust::device_vector<double> oldsignals_gpu;
thrust::device_vector<double> oldisosignals_gpu;
angtmp_gpu.resize(nvox*ndirections*nfib);
oldangtmp_gpu.resize(nvox*ndirections);
oldsignals_gpu.resize(nvox*ndirections*nfib);
oldisosignals_gpu.resize(nvox*ndirections);
unsigned int totalrandoms=(opts.njumps.value() * nvox * nparams);
cuMemGetInfo(&free,&total);
myfile << "Free memory Before Randoms: "<< free << " ---- Total memory: " << total << "\n";
//4 bytes each float, 2 random arrays, and 80% of total memory at this moment
unsigned int maxrandoms=((free*0.8)/(4*2));
myfile << "Total randoms: " << totalrandoms << "\n";
myfile << "Max randoms: " << maxrandoms << "\n";
int steps; //num iter if not enough memory
int minrandoms; //min num of randoms ensamble
minrandoms= nvox * nparams;
int iters_step=0;
int nrandoms=0;
if(totalrandoms>maxrandoms){
iters_step = maxrandoms / minrandoms; //iterations in each step
nrandoms = iters_step*minrandoms; //nrandoms for each step
steps = (opts.njumps.value()/iters_step); //repeat process steps times, no enough memory for all randoms
}else{
nrandoms = totalrandoms;
iters_step= opts.njumps.value();
steps = 0;
}
if(nrandoms%2){ //CURAND must generates multiples of 2 randoms
nrandoms++;
}
myfile << "Process " << opts.njumps.value() << " iterations divided in "<< steps << " steps with "<< iters_step << " iterations in each one" << "\n";
int last_step = opts.njumps.value() - (iters_step*steps);
int last_randoms = (last_step*minrandoms);
if(last_randoms%2){ //CURAND must generates multiples of 2 randoms
last_randoms++;
}
myfile << "Last step with " << last_step << " iterations" << "\n";
thrust::device_vector<float> randomsN_gpu;
thrust::device_vector<float> randomsU_gpu;
randomsN_gpu.resize(nrandoms);
randomsU_gpu.resize(nrandoms);
cuMemGetInfo(&free,&total);
myfile << "Free memory after Malloc Randoms: "<< free << " ---- Total memory: " << total << "\n";
int blocks = nvox;
dim3 Dim_Grid(blocks, 1);
dim3 Dim_Block(THREADS_BLOCK_MCMC,1); //dimensions for MCMC
myfile << "\n" << "NUM BLOCKS: " << blocks << "\n";
myfile << "THREADS PER BLOCK : " << THREADS_BLOCK_MCMC << "\n\n";
curandGenerator_t gen;
curandCreateGenerator(&gen,CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen,seed);
//get pointers
float *datam_ptr = thrust::raw_pointer_cast(datam_gpu.data());
float *bvals_ptr = thrust::raw_pointer_cast(bvals_gpu.data());
double *alpha_ptr = thrust::raw_pointer_cast(alpha_gpu.data());
double *beta_ptr = thrust::raw_pointer_cast(beta_gpu.data());
float *randomsN_ptr = thrust::raw_pointer_cast(randomsN_gpu.data());
float *randomsU_ptr = thrust::raw_pointer_cast(randomsU_gpu.data());
FibreGPU *fibres_ptr = thrust::raw_pointer_cast(fibres_gpu.data());
MultifibreGPU *multifibres_ptr = thrust::raw_pointer_cast(multifibres_gpu.data());
double *signals_ptr = thrust::raw_pointer_cast(signals_gpu.data());
double *isosignals_ptr = thrust::raw_pointer_cast(isosignals_gpu.data());
double *angtmp_ptr = thrust::raw_pointer_cast(angtmp_gpu.data());
double *oldangtmp_ptr = thrust::raw_pointer_cast(oldangtmp_gpu.data());
double *oldsignals_ptr = thrust::raw_pointer_cast(oldsignals_gpu.data());
double *oldisosignals_ptr = thrust::raw_pointer_cast(oldisosignals_gpu.data());
float *rf0_ptr = thrust::raw_pointer_cast(rf0_gpu.data());
float *rtau_ptr = thrust::raw_pointer_cast(rtau_gpu.data());
float *rs0_ptr = thrust::raw_pointer_cast(rs0_gpu.data());
float *rd_ptr = thrust::raw_pointer_cast(rd_gpu.data());
float *rdstd_ptr = thrust::raw_pointer_cast(rdstd_gpu.data());
float *rR_ptr = thrust::raw_pointer_cast(rR_gpu.data());
float *rth_ptr = thrust::raw_pointer_cast(rth_gpu.data());
float *rph_ptr = thrust::raw_pointer_cast(rph_gpu.data());
float *rf_ptr = thrust::raw_pointer_cast(rf_gpu.data());
int amount_shared = (THREADS_BLOCK_MCMC)*sizeof(double) + (10*nfib + 2*nparams + 27)*sizeof(float) + (7*nfib + 21)*sizeof(int);
myfile << "Shared Memory Used in runmcmc_record: " << amount_shared << "\n";
for(int i=0;i<steps;i++){
gettimeofday(&t1,NULL);
curandStatus_t status = curandGenerateNormal(gen,randomsN_ptr,nrandoms,0,1);
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = curandGenerateUniform(gen,randomsU_ptr,nrandoms); //generate randoms
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
runmcmc_kernel<<< Dim_Grid, Dim_Block, amount_shared >>>(datam_ptr, bvals_ptr, alpha_ptr, beta_ptr, randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), iters_step, (i*iters_step), opts.nburn.value(), opts.sampleevery.value(), totalrecords, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr, rf0_ptr, rtau_ptr, rs0_ptr, rd_ptr, rdstd_ptr, rR_ptr, rth_ptr, rph_ptr, rf_ptr);
sync_check("runmcmc_record_kernel");
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
}
gettimeofday(&t1,NULL);
if(nvox!=0){
curandStatus_t status = curandGenerateNormal(gen,randomsN_ptr,last_randoms,0,1);
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
status = curandGenerateUniform(gen,randomsU_ptr,last_randoms); //generate randoms
if (status != CURAND_STATUS_SUCCESS)
{
printf("Failure generating cuda random numbers: %d\n",status);
exit(1);
}
}
gettimeofday(&t2,NULL);
timecurand+=timeval_diff(&t2,&t1);
gettimeofday(&t1,NULL);
if(nvox!=0){
runmcmc_kernel<<< Dim_Grid, Dim_Block, amount_shared >>>(datam_ptr, bvals_ptr, alpha_ptr, beta_ptr,randomsN_ptr, randomsU_ptr, opts.R_prior_mean.value(), opts.R_prior_std.value(),opts.R_prior_fudge.value(), ndirections, nfib, nparams, opts.modelnum.value(), opts.fudge.value(), opts.f0.value(), opts.ardf0.value(), !opts.no_ard.value(), opts.rician.value(), gradnonlin, opts.updateproposalevery.value(), last_step, (steps*iters_step), opts.nburn.value(), opts.sampleevery.value(), totalrecords, oldsignals_ptr, oldisosignals_ptr, angtmp_ptr, oldangtmp_ptr, fibres_ptr, multifibres_ptr, signals_ptr, isosignals_ptr, rf0_ptr, rtau_ptr, rs0_ptr, rd_ptr, rdstd_ptr, rR_ptr, rth_ptr, rph_ptr, rf_ptr);
sync_check("runmcmc_record_kernel");
}
gettimeofday(&t2,NULL);
timemcmc+=timeval_diff(&t2,&t1);
myfile << "TIME CURAND: " << timecurand << " seconds\n";
myfile << "TIME RUNMCMC: " << timemcmc << " seconds\n";
curandDestroyGenerator(gen);
gettimeofday(&t_tot2,NULL);
time=timeval_diff(&t_tot2,&t_tot1);
myfile << "TIME TOTAL: " << time << " seconds\n";
myfile << "-----------------------------------------------------" << "\n" ;
myfile.close();
sync_check("runmcmc_record");
}
|
e4da23c1e29f9c18d0230574b0035886df312501.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include "dropout.cuh"
#include "softmax.cuh"
// symbol to be automatically resolved by PyTorch libs
namespace multihead_attn {
namespace fused_softmax {
namespace additive_mask_softmax_dropout {
std::vector<torch::Tensor> fwd_cuda(bool is_training, int heads,
torch::Tensor const &input,
const half *pad_mask, float dropout_prob) {
const int attn_batches = input.size(0);
const int sequences = attn_batches / heads;
const int q_seq_len = input.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated
// by ATen library code)
auto act_options = input.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor softmax_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask =
torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void *input_ptr = static_cast<void *>(input.data_ptr());
void *softmax_results_ptr = static_cast<void *>(softmax_results.data_ptr());
// Padded Softmax
bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), k_seq_len, k_seq_len,
attn_batches * q_seq_len);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), pad_mask, k_seq_len,
k_seq_len, attn_batches * q_seq_len,
attn_batches * q_seq_len / sequences);
}
if (is_training) {
// use at:: function so that C++ version generates the same random mask as
// python version
auto dropout_tuple =
at::_fused_dropout(softmax_results, 1.0f - dropout_prob);
dropout_results = std::get<0>(dropout_tuple);
dropout_mask = std::get<1>(dropout_tuple);
}
// Matmul2
return {dropout_results, dropout_mask, softmax_results};
}
torch::Tensor bwd_cuda(int heads, torch::Tensor const &output_grads,
torch::Tensor const &softmax_results,
torch::Tensor const &dropout_mask, float dropout_prob) {
const int attn_batches = output_grads.size(0);
const int q_seq_len = output_grads.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
// torch::Tensor input_grads = torch::empty_like(output_grads);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_stream<half, half, float, false>(
static_cast<half *>(output_grads.data_ptr()),
static_cast<half *>(output_grads.data_ptr()),
reinterpret_cast<half const *>(softmax_results.data_ptr()),
static_cast<uint8_t const *>(dropout_mask.data_ptr()),
1.0 / (1.0 - dropout_prob), k_seq_len, k_seq_len,
attn_batches * q_seq_len, stream);
// backward pass is completely in-place
return output_grads;
}
} // namespace additive_mask_softmax_dropout
} // namespace fused_softmax
} // namespace multihead_attn
| e4da23c1e29f9c18d0230574b0035886df312501.cu | #include <iostream>
#include <math.h>
#include <vector>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "dropout.cuh"
#include "softmax.cuh"
// symbol to be automatically resolved by PyTorch libs
namespace multihead_attn {
namespace fused_softmax {
namespace additive_mask_softmax_dropout {
std::vector<torch::Tensor> fwd_cuda(bool is_training, int heads,
torch::Tensor const &input,
const half *pad_mask, float dropout_prob) {
const int attn_batches = input.size(0);
const int sequences = attn_batches / heads;
const int q_seq_len = input.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated
// by ATen library code)
auto act_options = input.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor softmax_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask =
torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void *input_ptr = static_cast<void *>(input.data_ptr());
void *softmax_results_ptr = static_cast<void *>(softmax_results.data_ptr());
// Padded Softmax
bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), k_seq_len, k_seq_len,
attn_batches * q_seq_len);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half *>(softmax_results_ptr),
reinterpret_cast<const half *>(input_ptr), pad_mask, k_seq_len,
k_seq_len, attn_batches * q_seq_len,
attn_batches * q_seq_len / sequences);
}
if (is_training) {
// use at:: function so that C++ version generates the same random mask as
// python version
auto dropout_tuple =
at::_fused_dropout(softmax_results, 1.0f - dropout_prob);
dropout_results = std::get<0>(dropout_tuple);
dropout_mask = std::get<1>(dropout_tuple);
}
// Matmul2
return {dropout_results, dropout_mask, softmax_results};
}
torch::Tensor bwd_cuda(int heads, torch::Tensor const &output_grads,
torch::Tensor const &softmax_results,
torch::Tensor const &dropout_mask, float dropout_prob) {
const int attn_batches = output_grads.size(0);
const int q_seq_len = output_grads.size(1);
const int k_seq_len = q_seq_len;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
// torch::Tensor input_grads = torch::empty_like(output_grads);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_stream<half, half, float, false>(
static_cast<half *>(output_grads.data_ptr()),
static_cast<half *>(output_grads.data_ptr()),
reinterpret_cast<half const *>(softmax_results.data_ptr()),
static_cast<uint8_t const *>(dropout_mask.data_ptr()),
1.0 / (1.0 - dropout_prob), k_seq_len, k_seq_len,
attn_batches * q_seq_len, stream);
// backward pass is completely in-place
return output_grads;
}
} // namespace additive_mask_softmax_dropout
} // namespace fused_softmax
} // namespace multihead_attn
|
ce461f89c1be009cd665f0dd7b17501b7eb1daa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
#include <assert.h>
#include <limits>
#include <cassert>
#include <cstring>
#include <cmath>
#include <iostream>
//the global parm
const int sampleNum=2;
const int filterDem=4;
//#define debug
struct Bounds
{
int offset;
float hstart, hend, wstart, wend ;
bool isEmpty ;
} ;
__device__ __forceinline__ static Bounds getBounds
(int outputIndex,
int height, int width, int numChannels, int size,
const float* rois, int numROIs,
int subd,float transform)
{
Bounds b ;
int ph = outputIndex ;
int pw = ph / subd ;
int pc = pw / subd;
int pr = pc / numChannels ;
ph %= subd ;
pw %= subd ;
pc %= numChannels ;
rois += 5 * pr ;
// Apply sacle and offset to each ROI coordinate.
float u1_ = rois[1] ;
float v1_ = rois[2] ;
float u2_ = rois[3] ;
float v2_ = rois[4] ;
float u1 = transform * (u1_ - 1) + 1;
float v1 = transform * (v1_ - 1) + 1;
float u2 = transform * (u2_ - 1) + 1;
float v2 = transform * (v2_ - 1) + 1;
int roi_image = (int) rois[0];
float roi_start_h = v1 - 1 ;
float roi_start_w = u1 - 1 ;
float roi_end_h = v2 - 1 ;
float roi_end_w = u2 - 1 ;
float roi_height =max(roi_end_h - roi_start_h + 1.0, 1.0) ;
float roi_width = max(roi_end_w - roi_start_w + 1.0, 1.0) ;
float bin_size_h = (float)roi_height / subd ;
float bin_size_w = (float)roi_width / subd ;
roi_image = min(max(roi_image - 1,0), (int)size - 1) ;
b.offset = (roi_image * numChannels + pc) * (width * height) ;
b.wstart = (float)(((float)pw) * bin_size_w) ;
b.wend = (float)(((float)(pw + 1)) * bin_size_w) ;
b.wstart = min(max(b.wstart + roi_start_w,(float) 0.0), (float)width) ;
b.wend =min(max(b.wend + roi_start_w,(float)0.0), (float)width) ;
b.hstart = (float)(((float)ph) * bin_size_h) ;
b.hend = (float)((float)(ph + 1) * bin_size_h) ;
b.hstart = min(max(b.hstart + roi_start_h, (float)0.0), (float)height) ;
b.hend = min(max(b.hend + roi_start_h, (float)0.0), (float)height) ;
b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ;
return b ;
}
void __global__ roialign_max_froward
(float* output,
const float* data, int height, int width, int numChannels, int size,
const float* rois, int numROIs,
int subd,float transform)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ;
int outputVolume = subd * subd * numChannels * numROIs ;
// if (outputIndex < outputVolume)
if (outputIndex < outputVolume)
{
Bounds b = getBounds(outputIndex,
height,width,numChannels,size,
rois,numROIs,
subd,transform) ;
if (! b.isEmpty)
{
data += b.offset ;
// Define an empty pooling region to be zero
float maxval = -FLT_MAX;
float maxidx_x = 0.0;
float maxidx_y = 0.0;
float w_bin=(b.wend-b.wstart)/(float)(sampleNum+1);
float h_bin=(b.hend-b.hstart)/(float)(sampleNum+1);
int iter_x=0;
for (float w = b.wstart+w_bin; ;w=w+w_bin )
{
iter_x++;
int iter_y=0;
if(iter_x > sampleNum)
{
break;
}
for (float h = b.hstart+h_bin; ;h=h+h_bin)
{
iter_y++;
if(iter_y > sampleNum)
{
break;
}
// Selecting four regular locations for bilinear interpolation
int x_left = floor(w);
int x_right = ceil(w);
int y_top = floor(h);
int y_bottom = ceil(h);
int top_left_index = x_left * height + y_top;
int bottom_left_index = x_left * height + y_bottom;
int top_right_index = x_right * height + y_top;
int bottom_right_index = x_right * height + y_bottom;
bool is_top_left_in = x_left >= 0 && x_left <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_top_right_in = x_right >= 0 && x_right <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
float val = 0.0;
if (is_top_left_in)
{
val += (1 - (w - x_left)) * (1 - (h-y_top)) * data[top_left_index];
}
if (is_top_right_in)
{
val += (1 - (x_right-w)) * (1 - (h-y_top)) * data[top_right_index];
}
if (is_bottom_left_in)
{
val += (1 - (w-x_left)) * (1 - (y_bottom-h)) * data[bottom_left_index];
}
if (is_bottom_right_in)
{
val += (1-(x_right-w)) * (1-(y_bottom-h)) * data[bottom_right_index];
}
if (val > maxval)
{
maxval = val;
maxidx_x = w;
maxidx_y = h;
}
}
}
output[outputIndex] = maxval ;
}
else
{
output[outputIndex] = 0 ;
}
}
}
void __global__ roialign_max_backward
(const float* derData,
const float* data, int height, int width, int numChannels, int size,
const float* rois, int numROIs,
float* derOutput, int subd,float transform)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ;
int outputVolume = subd * subd * numChannels * numROIs ;
// if (outputIndex < outputVolume)
if (outputIndex < outputVolume)
{
Bounds b = getBounds(outputIndex,
height,width,numChannels,size,
rois,numROIs,
subd,transform) ;
if (! b.isEmpty)
{
data += b.offset ;
derData += b.offset ;
// Define an empty pooling region to be zero
float maxval = -FLT_MAX;
int index_left_top;
int index_right_top;
int index_left_bottom;
int index_right_bottom ;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
float maxidx_x = 0.0;
float maxidx_y = 0.0;
float w_bin=(b.wend-b.wstart) / (float)(sampleNum+1);
float h_bin=(b.hend-b.hstart) / (float)(sampleNum+1);
int iter_x=0;
// Selecting 2*2 regular locations for bilinear interpolation
for (float w = b.wstart + w_bin; ;w = w + w_bin )
{
iter_x++;
int iter_y = 0;
if(iter_x > sampleNum)
{
break;
}
for (float h = b.hstart + h_bin; ;h = h + h_bin)
{
iter_y++;
if(iter_y > sampleNum)
{
break;
}
int x_left = floor(w);
int x_right = ceil(w);
int y_top = floor(h);
int y_bottom = ceil(h);
int top_left_index = x_left * height + y_top;
int bottom_left_index = x_left * height + y_bottom;
int top_right_index = x_right * height + y_top;
int bottom_right_index = x_right * height + y_bottom;
bool is_top_left_in = x_left >= 0 && x_left <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_top_right_in = x_right >= 0 && x_right <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
float val = 0.0;
if (is_top_left_in)
{
val += (1 - (w - x_left)) * (1 - (h-y_top)) * data[top_left_index];
}
if (is_top_right_in)
{
val += (1 - (x_right-w)) * (1 - (h-y_top)) * data[top_right_index];
}
if (is_bottom_left_in)
{
val += (1 - (w-x_left)) * (1 - (y_bottom-h)) * data[bottom_left_index];
}
if (is_bottom_right_in)
{
val += (1-(x_right-w)) * (1-(y_bottom-h)) * data[bottom_right_index];
}
if (val > maxval)
{
maxval = val;
maxidx_x = w;
maxidx_y = h;
index_left_top = floor(maxidx_x) * height + floor(maxidx_y);
index_right_top = ceil(maxidx_x) * height + floor(maxidx_y);
index_left_bottom = floor(maxidx_x) * height + ceil(maxidx_y);
index_right_bottom = ceil(maxidx_x) * height + ceil(maxidx_y);
}
}
}
atomicAdd(derOutput + index_left_top, (1-(maxidx_x-floor(maxidx_x))) * (1-(maxidx_y-floor(maxidx_y))) * derOutput[outputIndex]) ;
atomicAdd(derOutput + index_left_bottom, (1-(maxidx_x-floor(maxidx_x))) * (1-(ceil(maxidx_y)-maxidx_y)) *derOutput[outputIndex]) ;
atomicAdd(derOutput + index_right_top, (1-(ceil(maxidx_x)-maxidx_x)) * (1-(maxidx_y-floor(maxidx_y))) *derOutput[outputIndex]) ;
atomicAdd(derOutput + index_right_bottom, (1-(ceil(maxidx_x)-maxidx_x)) * (1-(ceil(maxidx_y)-maxidx_y)) *derOutput[outputIndex]) ;
}
}
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
mxGPUArray const *in;
mxGPUArray *out;
mxGPUArray const *rois;
int subd;
float transform;
float const *p_in;
float const *p_rois;
float *p_out;
float const *p_derin;
//int N;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
char const * const errInput = "the input is not suitale.";
/* Choose a reasonably sized number of threads for the block. */
int threadsPerBlock = 1024;
int blocksPerGrid;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if (nrhs == 4)
{
in = mxGPUCreateFromMxArray(prhs[0]);
rois = mxGPUCreateFromMxArray(prhs[1]);
subd = mxGetScalar(prhs[2]);
transform = mxGetScalar(prhs[3]);
/*
* Verify that in really is a single array before extracting the pointer.
*/
if ((mxGPUGetClassID(in) != mxSINGLE_CLASS)||(mxGPUGetClassID(rois) != mxSINGLE_CLASS))
{
mexErrMsgIdAndTxt(errId, errMsg);
}
if((int)(mxGPUGetNumberOfDimensions(in)) != 4 && (int)(mxGPUGetNumberOfDimensions(in)) != 3)
{
mexErrMsgIdAndTxt(errId, errInput);
}
if((int)(mxGPUGetNumberOfDimensions(rois)) != 2)
{
mexErrMsgIdAndTxt(errId, errInput);
}
p_in = (float const *)(mxGPUGetDataReadOnly(in));
p_rois = (float const *)(mxGPUGetDataReadOnly(rois));
mwSize const *in_dim = mxGPUGetDimensions(in);
mwSize const *rois_dim = mxGPUGetDimensions(rois);
int height = in_dim[0];
int width = in_dim[1];
int numChannels = in_dim[2];
int numROIs = rois_dim[1];
int size = 1;
if((int)(mxGPUGetNumberOfDimensions(in)) == 4)
{
size = in_dim[3];
}
mwSize dim[4] = {(mwSize)subd,(mwSize)subd,in_dim[2],rois_dim[1]};
blocksPerGrid=(subd * subd * numChannels * numROIs + threadsPerBlock -1 ) / threadsPerBlock;
//mxGPUGetNumberOfDimensions(in)
out = mxGPUCreateGPUArray((mwSize)filterDem,
dim,
mxGPUGetClassID(in),
mxGPUGetComplexity(in),
MX_GPU_DO_NOT_INITIALIZE );
p_out = (float *)(mxGPUGetData(out));
/*
* Call the kernel using the CUDA runtime API. We are using a 1-d grid here,
* and it would be possible for the number of elements to be too large for
* the grid. For this example we are not guarding against this possibility.
*/
hipLaunchKernelGGL(( roialign_max_froward)
, dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0,
p_out,
p_in, height, width, numChannels, size,
p_rois, numROIs,
subd,transform);
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(out);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(in);
mxGPUDestroyGPUArray(out);
mxGPUDestroyGPUArray(rois);
}
else if(nrhs==5)
{
mxGPUArray const *derin;
in = mxGPUCreateFromMxArray(prhs[0]);
rois = mxGPUCreateFromMxArray(prhs[1]);
derin = mxGPUCreateFromMxArray(prhs[2]);
subd = mxGetScalar(prhs[3]);
transform = mxGetScalar(prhs[4]);
if ((mxGPUGetClassID(in) != mxSINGLE_CLASS)||(mxGPUGetClassID(rois) != mxSINGLE_CLASS)
||(mxGPUGetClassID(derin) != mxSINGLE_CLASS) )
{
mexErrMsgIdAndTxt(errId, errMsg);
}
if((int)(mxGPUGetNumberOfDimensions(in)) != 4 && (int)(mxGPUGetNumberOfDimensions(in)) != 3)
{
mexErrMsgIdAndTxt(errId, errInput);
}
if((int)(mxGPUGetNumberOfDimensions(rois)) !=2 )
{
mexErrMsgIdAndTxt(errId, errInput);
}
p_in = (float const *)(mxGPUGetDataReadOnly(in));
p_rois = (float const *)(mxGPUGetDataReadOnly(rois));
p_derin = (float const *)(mxGPUGetDataReadOnly(derin));
mwSize const* in_dim = mxGPUGetDimensions(in);
mwSize const* rois_dim = mxGPUGetDimensions(rois);
int height = in_dim[0];
int width = in_dim[1];
int numChannels = in_dim[2];
int numROIs = rois_dim[1];
int size = 1;
if((int)(mxGPUGetNumberOfDimensions(in)) == 4)
{
size = in_dim[3];
}
blocksPerGrid = (subd * subd * numChannels * numROIs + threadsPerBlock - 1) / threadsPerBlock;
out = mxGPUCreateGPUArray((mwSize)filterDem ,
mxGPUGetDimensions(in),
mxGPUGetClassID(in),
mxGPUGetComplexity(in),
MX_GPU_INITIALIZE_VALUES );
p_out = (float *)(mxGPUGetData(out));
hipLaunchKernelGGL(( roialign_max_backward)
, dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0,
p_derin, p_in,
height, width, numChannels, size,
p_rois, numROIs,
p_out,
subd,transform) ;
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(out);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(in);
mxGPUDestroyGPUArray(out);
mxGPUDestroyGPUArray(rois);
mxGPUDestroyGPUArray(derin);
}
else
{
mexErrMsgIdAndTxt(errId, errMsg);
}
}
| ce461f89c1be009cd665f0dd7b17501b7eb1daa2.cu | #include <stdio.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
#include <assert.h>
#include <limits>
#include <cassert>
#include <cstring>
#include <cmath>
#include <iostream>
//the global parm
const int sampleNum=2;
const int filterDem=4;
//#define debug
struct Bounds
{
int offset;
float hstart, hend, wstart, wend ;
bool isEmpty ;
} ;
__device__ __forceinline__ static Bounds getBounds
(int outputIndex,
int height, int width, int numChannels, int size,
const float* rois, int numROIs,
int subd,float transform)
{
Bounds b ;
int ph = outputIndex ;
int pw = ph / subd ;
int pc = pw / subd;
int pr = pc / numChannels ;
ph %= subd ;
pw %= subd ;
pc %= numChannels ;
rois += 5 * pr ;
// Apply sacle and offset to each ROI coordinate.
float u1_ = rois[1] ;
float v1_ = rois[2] ;
float u2_ = rois[3] ;
float v2_ = rois[4] ;
float u1 = transform * (u1_ - 1) + 1;
float v1 = transform * (v1_ - 1) + 1;
float u2 = transform * (u2_ - 1) + 1;
float v2 = transform * (v2_ - 1) + 1;
int roi_image = (int) rois[0];
float roi_start_h = v1 - 1 ;
float roi_start_w = u1 - 1 ;
float roi_end_h = v2 - 1 ;
float roi_end_w = u2 - 1 ;
float roi_height =max(roi_end_h - roi_start_h + 1.0, 1.0) ;
float roi_width = max(roi_end_w - roi_start_w + 1.0, 1.0) ;
float bin_size_h = (float)roi_height / subd ;
float bin_size_w = (float)roi_width / subd ;
roi_image = min(max(roi_image - 1,0), (int)size - 1) ;
b.offset = (roi_image * numChannels + pc) * (width * height) ;
b.wstart = (float)(((float)pw) * bin_size_w) ;
b.wend = (float)(((float)(pw + 1)) * bin_size_w) ;
b.wstart = min(max(b.wstart + roi_start_w,(float) 0.0), (float)width) ;
b.wend =min(max(b.wend + roi_start_w,(float)0.0), (float)width) ;
b.hstart = (float)(((float)ph) * bin_size_h) ;
b.hend = (float)((float)(ph + 1) * bin_size_h) ;
b.hstart = min(max(b.hstart + roi_start_h, (float)0.0), (float)height) ;
b.hend = min(max(b.hend + roi_start_h, (float)0.0), (float)height) ;
b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ;
return b ;
}
void __global__ roialign_max_froward
(float* output,
const float* data, int height, int width, int numChannels, int size,
const float* rois, int numROIs,
int subd,float transform)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ;
int outputVolume = subd * subd * numChannels * numROIs ;
// if (outputIndex < outputVolume)
if (outputIndex < outputVolume)
{
Bounds b = getBounds(outputIndex,
height,width,numChannels,size,
rois,numROIs,
subd,transform) ;
if (! b.isEmpty)
{
data += b.offset ;
// Define an empty pooling region to be zero
float maxval = -FLT_MAX;
float maxidx_x = 0.0;
float maxidx_y = 0.0;
float w_bin=(b.wend-b.wstart)/(float)(sampleNum+1);
float h_bin=(b.hend-b.hstart)/(float)(sampleNum+1);
int iter_x=0;
for (float w = b.wstart+w_bin; ;w=w+w_bin )
{
iter_x++;
int iter_y=0;
if(iter_x > sampleNum)
{
break;
}
for (float h = b.hstart+h_bin; ;h=h+h_bin)
{
iter_y++;
if(iter_y > sampleNum)
{
break;
}
// Selecting four regular locations for bilinear interpolation
int x_left = floor(w);
int x_right = ceil(w);
int y_top = floor(h);
int y_bottom = ceil(h);
int top_left_index = x_left * height + y_top;
int bottom_left_index = x_left * height + y_bottom;
int top_right_index = x_right * height + y_top;
int bottom_right_index = x_right * height + y_bottom;
bool is_top_left_in = x_left >= 0 && x_left <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_top_right_in = x_right >= 0 && x_right <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
float val = 0.0;
if (is_top_left_in)
{
val += (1 - (w - x_left)) * (1 - (h-y_top)) * data[top_left_index];
}
if (is_top_right_in)
{
val += (1 - (x_right-w)) * (1 - (h-y_top)) * data[top_right_index];
}
if (is_bottom_left_in)
{
val += (1 - (w-x_left)) * (1 - (y_bottom-h)) * data[bottom_left_index];
}
if (is_bottom_right_in)
{
val += (1-(x_right-w)) * (1-(y_bottom-h)) * data[bottom_right_index];
}
if (val > maxval)
{
maxval = val;
maxidx_x = w;
maxidx_y = h;
}
}
}
output[outputIndex] = maxval ;
}
else
{
output[outputIndex] = 0 ;
}
}
}
void __global__ roialign_max_backward
(const float* derData,
const float* data, int height, int width, int numChannels, int size,
const float* rois, int numROIs,
float* derOutput, int subd,float transform)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ;
int outputVolume = subd * subd * numChannels * numROIs ;
// if (outputIndex < outputVolume)
if (outputIndex < outputVolume)
{
Bounds b = getBounds(outputIndex,
height,width,numChannels,size,
rois,numROIs,
subd,transform) ;
if (! b.isEmpty)
{
data += b.offset ;
derData += b.offset ;
// Define an empty pooling region to be zero
float maxval = -FLT_MAX;
int index_left_top;
int index_right_top;
int index_left_bottom;
int index_right_bottom ;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
float maxidx_x = 0.0;
float maxidx_y = 0.0;
float w_bin=(b.wend-b.wstart) / (float)(sampleNum+1);
float h_bin=(b.hend-b.hstart) / (float)(sampleNum+1);
int iter_x=0;
// Selecting 2*2 regular locations for bilinear interpolation
for (float w = b.wstart + w_bin; ;w = w + w_bin )
{
iter_x++;
int iter_y = 0;
if(iter_x > sampleNum)
{
break;
}
for (float h = b.hstart + h_bin; ;h = h + h_bin)
{
iter_y++;
if(iter_y > sampleNum)
{
break;
}
int x_left = floor(w);
int x_right = ceil(w);
int y_top = floor(h);
int y_bottom = ceil(h);
int top_left_index = x_left * height + y_top;
int bottom_left_index = x_left * height + y_bottom;
int top_right_index = x_right * height + y_top;
int bottom_right_index = x_right * height + y_bottom;
bool is_top_left_in = x_left >= 0 && x_left <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_top_right_in = x_right >= 0 && x_right <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
float val = 0.0;
if (is_top_left_in)
{
val += (1 - (w - x_left)) * (1 - (h-y_top)) * data[top_left_index];
}
if (is_top_right_in)
{
val += (1 - (x_right-w)) * (1 - (h-y_top)) * data[top_right_index];
}
if (is_bottom_left_in)
{
val += (1 - (w-x_left)) * (1 - (y_bottom-h)) * data[bottom_left_index];
}
if (is_bottom_right_in)
{
val += (1-(x_right-w)) * (1-(y_bottom-h)) * data[bottom_right_index];
}
if (val > maxval)
{
maxval = val;
maxidx_x = w;
maxidx_y = h;
index_left_top = floor(maxidx_x) * height + floor(maxidx_y);
index_right_top = ceil(maxidx_x) * height + floor(maxidx_y);
index_left_bottom = floor(maxidx_x) * height + ceil(maxidx_y);
index_right_bottom = ceil(maxidx_x) * height + ceil(maxidx_y);
}
}
}
atomicAdd(derOutput + index_left_top, (1-(maxidx_x-floor(maxidx_x))) * (1-(maxidx_y-floor(maxidx_y))) * derOutput[outputIndex]) ;
atomicAdd(derOutput + index_left_bottom, (1-(maxidx_x-floor(maxidx_x))) * (1-(ceil(maxidx_y)-maxidx_y)) *derOutput[outputIndex]) ;
atomicAdd(derOutput + index_right_top, (1-(ceil(maxidx_x)-maxidx_x)) * (1-(maxidx_y-floor(maxidx_y))) *derOutput[outputIndex]) ;
atomicAdd(derOutput + index_right_bottom, (1-(ceil(maxidx_x)-maxidx_x)) * (1-(ceil(maxidx_y)-maxidx_y)) *derOutput[outputIndex]) ;
}
}
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
mxGPUArray const *in;
mxGPUArray *out;
mxGPUArray const *rois;
int subd;
float transform;
float const *p_in;
float const *p_rois;
float *p_out;
float const *p_derin;
//int N;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
char const * const errInput = "the input is not suitale.";
/* Choose a reasonably sized number of threads for the block. */
int threadsPerBlock = 1024;
int blocksPerGrid;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if (nrhs == 4)
{
in = mxGPUCreateFromMxArray(prhs[0]);
rois = mxGPUCreateFromMxArray(prhs[1]);
subd = mxGetScalar(prhs[2]);
transform = mxGetScalar(prhs[3]);
/*
* Verify that in really is a single array before extracting the pointer.
*/
if ((mxGPUGetClassID(in) != mxSINGLE_CLASS)||(mxGPUGetClassID(rois) != mxSINGLE_CLASS))
{
mexErrMsgIdAndTxt(errId, errMsg);
}
if((int)(mxGPUGetNumberOfDimensions(in)) != 4 && (int)(mxGPUGetNumberOfDimensions(in)) != 3)
{
mexErrMsgIdAndTxt(errId, errInput);
}
if((int)(mxGPUGetNumberOfDimensions(rois)) != 2)
{
mexErrMsgIdAndTxt(errId, errInput);
}
p_in = (float const *)(mxGPUGetDataReadOnly(in));
p_rois = (float const *)(mxGPUGetDataReadOnly(rois));
mwSize const *in_dim = mxGPUGetDimensions(in);
mwSize const *rois_dim = mxGPUGetDimensions(rois);
int height = in_dim[0];
int width = in_dim[1];
int numChannels = in_dim[2];
int numROIs = rois_dim[1];
int size = 1;
if((int)(mxGPUGetNumberOfDimensions(in)) == 4)
{
size = in_dim[3];
}
mwSize dim[4] = {(mwSize)subd,(mwSize)subd,in_dim[2],rois_dim[1]};
blocksPerGrid=(subd * subd * numChannels * numROIs + threadsPerBlock -1 ) / threadsPerBlock;
//mxGPUGetNumberOfDimensions(in)
out = mxGPUCreateGPUArray((mwSize)filterDem,
dim,
mxGPUGetClassID(in),
mxGPUGetComplexity(in),
MX_GPU_DO_NOT_INITIALIZE );
p_out = (float *)(mxGPUGetData(out));
/*
* Call the kernel using the CUDA runtime API. We are using a 1-d grid here,
* and it would be possible for the number of elements to be too large for
* the grid. For this example we are not guarding against this possibility.
*/
roialign_max_froward
<<< blocksPerGrid,threadsPerBlock>>>
(p_out,
p_in, height, width, numChannels, size,
p_rois, numROIs,
subd,transform);
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(out);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(in);
mxGPUDestroyGPUArray(out);
mxGPUDestroyGPUArray(rois);
}
else if(nrhs==5)
{
mxGPUArray const *derin;
in = mxGPUCreateFromMxArray(prhs[0]);
rois = mxGPUCreateFromMxArray(prhs[1]);
derin = mxGPUCreateFromMxArray(prhs[2]);
subd = mxGetScalar(prhs[3]);
transform = mxGetScalar(prhs[4]);
if ((mxGPUGetClassID(in) != mxSINGLE_CLASS)||(mxGPUGetClassID(rois) != mxSINGLE_CLASS)
||(mxGPUGetClassID(derin) != mxSINGLE_CLASS) )
{
mexErrMsgIdAndTxt(errId, errMsg);
}
if((int)(mxGPUGetNumberOfDimensions(in)) != 4 && (int)(mxGPUGetNumberOfDimensions(in)) != 3)
{
mexErrMsgIdAndTxt(errId, errInput);
}
if((int)(mxGPUGetNumberOfDimensions(rois)) !=2 )
{
mexErrMsgIdAndTxt(errId, errInput);
}
p_in = (float const *)(mxGPUGetDataReadOnly(in));
p_rois = (float const *)(mxGPUGetDataReadOnly(rois));
p_derin = (float const *)(mxGPUGetDataReadOnly(derin));
mwSize const* in_dim = mxGPUGetDimensions(in);
mwSize const* rois_dim = mxGPUGetDimensions(rois);
int height = in_dim[0];
int width = in_dim[1];
int numChannels = in_dim[2];
int numROIs = rois_dim[1];
int size = 1;
if((int)(mxGPUGetNumberOfDimensions(in)) == 4)
{
size = in_dim[3];
}
blocksPerGrid = (subd * subd * numChannels * numROIs + threadsPerBlock - 1) / threadsPerBlock;
out = mxGPUCreateGPUArray((mwSize)filterDem ,
mxGPUGetDimensions(in),
mxGPUGetClassID(in),
mxGPUGetComplexity(in),
MX_GPU_INITIALIZE_VALUES );
p_out = (float *)(mxGPUGetData(out));
roialign_max_backward
<<< blocksPerGrid,threadsPerBlock>>>
(p_derin, p_in,
height, width, numChannels, size,
p_rois, numROIs,
p_out,
subd,transform) ;
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(out);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(in);
mxGPUDestroyGPUArray(out);
mxGPUDestroyGPUArray(rois);
mxGPUDestroyGPUArray(derin);
}
else
{
mexErrMsgIdAndTxt(errId, errMsg);
}
}
|
259ccc2054417ecbde3143625280640955861889.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/gatedeuclidean_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void GatedEuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int_tp count = bottom[0]->count();
Dtype dot;
const Dtype* gateFlag = bottom[3]->cpu_data();
//LOG(INFO) << "=====>GatedEuclideanLoss.forward gateFlag: " << gateFlag[0];
//LOG(INFO) << "====>bottom[0]->num(): " << bottom[0]->num();
// computed the loss if gateflag is on
if (gateFlag[0] == 0) {
return;
}
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_sub<Dtype>(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
diff_.mutable_gpu_data());
// Scale the error element-wise
//if (bottom.size() == 3) {
if (bottom.size() == 4) {
caffe_gpu_mul<Dtype>(count, diff_.mutable_gpu_data(),
bottom[2]->gpu_data(), diff_.mutable_gpu_data());
}
caffe_gpu_dot<Dtype>(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
greentea_gpu_sub<Dtype>(this->device_->id(), count,
(cl_mem) (bottom[0]->gpu_data()), 0,
(cl_mem) (bottom[1]->gpu_data()), 0,
(cl_mem) (diff_.mutable_gpu_data()), 0);
// Scale the error element-wise
if (bottom.size() == 3) {
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) (diff_.mutable_gpu_data()), 0,
(cl_mem) (bottom[2]->gpu_data()), 0,
(cl_mem) (diff_.mutable_gpu_data()), 0);
}
greentea_gpu_dot<Dtype>(this->device_->id(), count,
(cl_mem) (diff_.gpu_data()), 0,
(cl_mem) (diff_.gpu_data()), 0, &dot);
#endif // USE_GREENTEA
}
Dtype loss = dot / static_cast<Dtype>(bottom[0]->count(0)) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template<typename Dtype>
void GatedEuclideanLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* gateFlag = bottom[3]->cpu_data();
//LOG(INFO) << "=====>GatedEuclideanLoss.backward gateFlag: " << gateFlag[0];
// backpropage if gateFlag is on
if (gateFlag[0] == 0) {
return;
}
for (int_tp i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0]
/ static_cast<Dtype>(bottom[0]->count(0));
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_axpby(bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
greentea_gpu_axpby(this->device_->id(), bottom[i]->count(), alpha,
(cl_mem) (diff_.gpu_data()), 0, Dtype(0),
(cl_mem) (bottom[i]->mutable_gpu_diff()), 0);
#endif // USE_GREENTEA
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GatedEuclideanLossLayer);
} // namespace caffe
| 259ccc2054417ecbde3143625280640955861889.cu | #include <vector>
#include "caffe/layers/gatedeuclidean_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void GatedEuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int_tp count = bottom[0]->count();
Dtype dot;
const Dtype* gateFlag = bottom[3]->cpu_data();
//LOG(INFO) << "=====>GatedEuclideanLoss.forward gateFlag: " << gateFlag[0];
//LOG(INFO) << "====>bottom[0]->num(): " << bottom[0]->num();
// computed the loss if gateflag is on
if (gateFlag[0] == 0) {
return;
}
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_sub<Dtype>(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
diff_.mutable_gpu_data());
// Scale the error element-wise
//if (bottom.size() == 3) {
if (bottom.size() == 4) {
caffe_gpu_mul<Dtype>(count, diff_.mutable_gpu_data(),
bottom[2]->gpu_data(), diff_.mutable_gpu_data());
}
caffe_gpu_dot<Dtype>(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_sub<Dtype>(this->device_->id(), count,
(cl_mem) (bottom[0]->gpu_data()), 0,
(cl_mem) (bottom[1]->gpu_data()), 0,
(cl_mem) (diff_.mutable_gpu_data()), 0);
// Scale the error element-wise
if (bottom.size() == 3) {
greentea_gpu_mul<Dtype>(this->device_->id(), count,
(cl_mem) (diff_.mutable_gpu_data()), 0,
(cl_mem) (bottom[2]->gpu_data()), 0,
(cl_mem) (diff_.mutable_gpu_data()), 0);
}
greentea_gpu_dot<Dtype>(this->device_->id(), count,
(cl_mem) (diff_.gpu_data()), 0,
(cl_mem) (diff_.gpu_data()), 0, &dot);
#endif // USE_GREENTEA
}
Dtype loss = dot / static_cast<Dtype>(bottom[0]->count(0)) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template<typename Dtype>
void GatedEuclideanLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* gateFlag = bottom[3]->cpu_data();
//LOG(INFO) << "=====>GatedEuclideanLoss.backward gateFlag: " << gateFlag[0];
// backpropage if gateFlag is on
if (gateFlag[0] == 0) {
return;
}
for (int_tp i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0]
/ static_cast<Dtype>(bottom[0]->count(0));
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_axpby(bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_axpby(this->device_->id(), bottom[i]->count(), alpha,
(cl_mem) (diff_.gpu_data()), 0, Dtype(0),
(cl_mem) (bottom[i]->mutable_gpu_diff()), 0);
#endif // USE_GREENTEA
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GatedEuclideanLossLayer);
} // namespace caffe
|
b5027115b7ce975eb338d2de0a2fc837571bc548.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <system/op_boilerplate.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void nesterovsUpdaterCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vin,
const sd::LongType* inShapeInfo, void* vz, const sd::LongType* zShapeInfo,
void* vst, const sd::LongType* stShapeInfo, const T lr, const T momentum) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto init = reinterpret_cast<const T*>(vin);
auto up = reinterpret_cast<T*>(vz);
auto st = reinterpret_cast<T*>(vst);
__shared__ sd::LongType xLen;
__shared__ T momentumT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInSame, bXStSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
momentumT = (-momentum - 1);
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stShapeInfo) && 1 == shape::elementWiseStride(inShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) &&
shape::order(xShapeInfo) == shape::order(inShapeInfo) &&
shape::order(xShapeInfo) == shape::order(stShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInSame = shape::haveSameShapeAndStrides(xShapeInfo, inShapeInfo);
bXStSame = shape::haveSameShapeAndStrides(xShapeInfo, stShapeInfo);
}
__syncthreads();
sd::LongType coords[SD_MAX_RANK];
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
sd::LongType xOffset = i, zOffset = i, initOffset = i, stOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initOffset = bXInSame ? xOffset : shape::getOffset(inShapeInfo, coords);
stOffset = bXStSame ? xOffset : shape::getOffset(stShapeInfo, coords);
}
T prevState = momentum * init[initOffset];
st[stOffset] = prevState - lr * grad[xOffset];
up[zOffset] = prevState + momentumT * st[stOffset];
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void nesterovsUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMemory,
const hipStream_t* stream, const void* vx, const sd::LongType* xShapeInfo,
const void* vin, const sd::LongType* inShapeInfo, void* vz,
const sd::LongType* zShapeInfo, void* vst, const sd::LongType* stShapeInfo,
const double dLr, const double dMomentum) {
const T lr = static_cast<T>(dLr);
const T momentum = static_cast<T>(dMomentum);
hipLaunchKernelGGL(( nesterovsUpdaterCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemory, *stream, vx, xShapeInfo, vin, inShapeInfo, vz,
zShapeInfo, vst, stShapeInfo, lr, momentum);
}
///////////////////////////////////////////////////////////////////
void updaterNesterovs(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initState, NDArray& update,
NDArray& stateV, const double dLr, const double dMomentum) {
PointersManager manager(context, "nesterovsUpdater");
dim3 launchDims = updaterDims(gradient.lengthOf());
NDArray::prepareSpecialUse({&update, &stateV}, {&gradient, &initState});
BUILD_SINGLE_SELECTOR(
gradient.dataType(), nesterovsUpdaterCudaLauncher,
(launchDims.y, launchDims.x,launchDims.z, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(),
initState.specialBuffer(), initState.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(),
stateV.specialBuffer(), stateV.specialShapeInfo(), dLr, dMomentum),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&update, &stateV}, {&gradient, &initState});
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
| b5027115b7ce975eb338d2de0a2fc837571bc548.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <system/op_boilerplate.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void nesterovsUpdaterCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vin,
const sd::LongType* inShapeInfo, void* vz, const sd::LongType* zShapeInfo,
void* vst, const sd::LongType* stShapeInfo, const T lr, const T momentum) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto init = reinterpret_cast<const T*>(vin);
auto up = reinterpret_cast<T*>(vz);
auto st = reinterpret_cast<T*>(vst);
__shared__ sd::LongType xLen;
__shared__ T momentumT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInSame, bXStSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
momentumT = (-momentum - 1);
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stShapeInfo) && 1 == shape::elementWiseStride(inShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) &&
shape::order(xShapeInfo) == shape::order(inShapeInfo) &&
shape::order(xShapeInfo) == shape::order(stShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInSame = shape::haveSameShapeAndStrides(xShapeInfo, inShapeInfo);
bXStSame = shape::haveSameShapeAndStrides(xShapeInfo, stShapeInfo);
}
__syncthreads();
sd::LongType coords[SD_MAX_RANK];
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
sd::LongType xOffset = i, zOffset = i, initOffset = i, stOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initOffset = bXInSame ? xOffset : shape::getOffset(inShapeInfo, coords);
stOffset = bXStSame ? xOffset : shape::getOffset(stShapeInfo, coords);
}
T prevState = momentum * init[initOffset];
st[stOffset] = prevState - lr * grad[xOffset];
up[zOffset] = prevState + momentumT * st[stOffset];
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void nesterovsUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMemory,
const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo,
const void* vin, const sd::LongType* inShapeInfo, void* vz,
const sd::LongType* zShapeInfo, void* vst, const sd::LongType* stShapeInfo,
const double dLr, const double dMomentum) {
const T lr = static_cast<T>(dLr);
const T momentum = static_cast<T>(dMomentum);
nesterovsUpdaterCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMemory, *stream>>>(vx, xShapeInfo, vin, inShapeInfo, vz,
zShapeInfo, vst, stShapeInfo, lr, momentum);
}
///////////////////////////////////////////////////////////////////
void updaterNesterovs(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initState, NDArray& update,
NDArray& stateV, const double dLr, const double dMomentum) {
PointersManager manager(context, "nesterovsUpdater");
dim3 launchDims = updaterDims(gradient.lengthOf());
NDArray::prepareSpecialUse({&update, &stateV}, {&gradient, &initState});
BUILD_SINGLE_SELECTOR(
gradient.dataType(), nesterovsUpdaterCudaLauncher,
(launchDims.y, launchDims.x,launchDims.z, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(),
initState.specialBuffer(), initState.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(),
stateV.specialBuffer(), stateV.specialShapeInfo(), dLr, dMomentum),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&update, &stateV}, {&gradient, &initState});
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
69ec7e39b407a626cdf38a864911205541fe4c8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <gauge_field.h>
#include <tune_quda.h>
#include <tune_quda.h>
#include <quda_matrix.h>
#include <gauge_field_order.h>
#ifdef GPU_HISQ_FORCE
// work around for CUDA 7.0 bug on OSX
#if defined(__APPLE__) && TORCH_HIP_VERSION >= 7000 && TORCH_HIP_VERSION < 7050
#define EXPONENT_TYPE Real
#else
#define EXPONENT_TYPE int
#endif
namespace quda{
namespace { // anonymous
#include <svd_quda.h>
}
#define HISQ_UNITARIZE_PI 3.14159265358979323846
#define HISQ_UNITARIZE_PI23 HISQ_UNITARIZE_PI*2.0/3.0
static double unitarize_eps;
static double force_filter;
static double max_det_error;
static bool allow_svd;
static bool svd_only;
static double svd_rel_error;
static double svd_abs_error;
namespace fermion_force {
template <typename F, typename G>
struct UnitarizeForceArg {
int threads;
F force;
F force_old;
G gauge;
int *fails;
const double unitarize_eps;
const double force_filter;
const double max_det_error;
const int allow_svd;
const int svd_only;
const double svd_rel_error;
const double svd_abs_error;
UnitarizeForceArg(const F &force, const F &force_old, const G &gauge, const GaugeField &meta, int *fails,
double unitarize_eps, double force_filter, double max_det_error, int allow_svd,
int svd_only, double svd_rel_error, double svd_abs_error)
: threads(1), force(force), force_old(force_old), gauge(gauge), fails(fails), unitarize_eps(unitarize_eps),
force_filter(force_filter), max_det_error(max_det_error), allow_svd(allow_svd),
svd_only(svd_only), svd_rel_error(svd_rel_error), svd_abs_error(svd_abs_error)
{
for(int dir=0; dir<4; ++dir) threads *= meta.X()[dir];
}
};
void setUnitarizeForceConstants(double unitarize_eps_, double force_filter_,
double max_det_error_, bool allow_svd_, bool svd_only_,
double svd_rel_error_, double svd_abs_error_)
{
unitarize_eps = unitarize_eps_;
force_filter = force_filter_;
max_det_error = max_det_error_;
allow_svd = allow_svd_;
svd_only = svd_only_;
svd_rel_error = svd_rel_error_;
svd_abs_error = svd_abs_error_;
}
template<class Real>
class DerivativeCoefficients{
private:
Real b[6];
__device__ __host__
Real computeC00(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC01(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC02(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC11(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC12(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC22(const Real &, const Real &, const Real &);
public:
__device__ __host__ void set(const Real & u, const Real & v, const Real & w);
__device__ __host__
Real getB00() const { return b[0]; }
__device__ __host__
Real getB01() const { return b[1]; }
__device__ __host__
Real getB02() const { return b[2]; }
__device__ __host__
Real getB11() const { return b[3]; }
__device__ __host__
Real getB12() const { return b[4]; }
__device__ __host__
Real getB22() const { return b[5]; }
};
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC00(const Real & u, const Real & v, const Real & w){
Real result = -pow(w,static_cast<EXPONENT_TYPE>(3)) * pow(u,static_cast<EXPONENT_TYPE>(6))
+ 3*v*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(4))
+ 3*pow(v,static_cast<EXPONENT_TYPE>(4))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- pow(v,static_cast<EXPONENT_TYPE>(6))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 4*pow(w,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 12*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 16*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 3*pow(v,static_cast<EXPONENT_TYPE>(5))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 8*v*pow(w,static_cast<EXPONENT_TYPE>(4))*u
- 3*pow(v,static_cast<EXPONENT_TYPE>(4))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ pow(w,static_cast<EXPONENT_TYPE>(5))
+ pow(v,static_cast<EXPONENT_TYPE>(3))*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC01(const Real & u, const Real & v, const Real & w){
Real result = - pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(7))
- pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(6))
+ pow(v,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(5)) // This was corrected!
+ 6*v*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5))
- 5*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(4)) // This was corrected!
- pow(v,static_cast<EXPONENT_TYPE>(3))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- 2*pow(v,static_cast<EXPONENT_TYPE>(5))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 6*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 10*v*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 6*pow(v,static_cast<EXPONENT_TYPE>(4))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 3*pow(w,static_cast<EXPONENT_TYPE>(4))*u
- 6*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ 2*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC02(const Real & u, const Real & v, const Real & w){
Real result = pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5))
+ pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- pow(v,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 4*v*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 4*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 3*pow(v,static_cast<EXPONENT_TYPE>(3))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 3*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ v*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC11(const Real & u, const Real & v, const Real & w){
Real result = - w*pow(u,static_cast<EXPONENT_TYPE>(8))
- pow(v,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(7))
+ 7*v*w*pow(u,static_cast<EXPONENT_TYPE>(6))
+ 4*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(5))
- 5*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5))
- 16*pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- 4*pow(v,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 16*v*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 3*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 12*pow(v,static_cast<EXPONENT_TYPE>(3))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 12*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ 3*v*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC12(const Real & u, const Real & v, const Real & w){
Real result = w*pow(u,static_cast<EXPONENT_TYPE>(6))
+ pow(v,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5)) // Fixed this!
- 5*v*w*pow(u,static_cast<EXPONENT_TYPE>(4)) // Fixed this!
- 2*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 4*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 6*pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 6*v*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC22(const Real & u, const Real & v, const Real & w){
Real result = - w*pow(u,static_cast<EXPONENT_TYPE>(4))
- pow(v,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 3*v*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 3*pow(w,static_cast<EXPONENT_TYPE>(2))*u;
return result;
}
template <class Real>
__device__ __host__
void DerivativeCoefficients<Real>::set(const Real & u, const Real & v, const Real & w){
const Real & denominator = 2.0*pow(w*(u*v-w),static_cast<EXPONENT_TYPE>(3));
b[0] = computeC00(u,v,w)/denominator;
b[1] = computeC01(u,v,w)/denominator;
b[2] = computeC02(u,v,w)/denominator;
b[3] = computeC11(u,v,w)/denominator;
b[4] = computeC12(u,v,w)/denominator;
b[5] = computeC22(u,v,w)/denominator;
return;
}
template<class Float>
__device__ __host__
void accumBothDerivatives(Matrix<complex<Float>,3>* result, const Matrix<complex<Float>,3> &left,
const Matrix<complex<Float>,3> &right, const Matrix<complex<Float>,3> &outer_prod)
{
const Float temp = (2.0*getTrace(left*outer_prod)).real();;
for(int k=0; k<3; ++k){
for(int l=0; l<3; ++l){
// Need to write it this way to get it to work
// on the CPU. Not sure why.
// FIXME check this is true
result->operator()(k,l).x += temp*right(k,l).x;
result->operator()(k,l).y += temp*right(k,l).y;
}
}
return;
}
template<class Cmplx>
__device__ __host__
void accumDerivatives(Matrix<Cmplx,3>* result, const Matrix<Cmplx,3> & left, const Matrix<Cmplx,3> & right, const Matrix<Cmplx,3> & outer_prod)
{
Cmplx temp = getTrace(left*outer_prod);
for(int k=0; k<3; ++k){
for(int l=0; l<3; ++l){
result->operator()(k,l) = temp*right(k,l);
}
}
return;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for (int i=1; i<size; ++i) {
T abs_val = fabs(array[i]);
if ((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Float, typename Arg>
__device__ __host__
void reciprocalRoot(Matrix<complex<Float>,3>* res, DerivativeCoefficients<Float>* deriv_coeffs,
Float f[3], Matrix<complex<Float>,3> & q, Arg &arg) {
Matrix<complex<Float>,3> qsq, tempq;
Float c[3];
Float g[3];
if(!arg.svd_only){
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x/2.0;
c[2] = getTrace(tempq).x/3.0;
g[0] = g[1] = g[2] = c[0]/3.;
Float r,s,theta;
s = c[1]/3. - c[0]*c[0]/18;
r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.);
Float cosTheta = r/sqrt(s*s*s);
if (fabs(s) < arg.unitarize_eps) {
cosTheta = 1.;
s = 0.0;
}
if(fabs(cosTheta)>1.0){ r>0 ? theta=0.0 : theta=HISQ_UNITARIZE_PI/3.0; }
else{ theta = acos(cosTheta)/3.0; }
s = 2.0*sqrt(s);
for(int i=0; i<3; ++i){
g[i] += s*cos(theta + (i-1)*HISQ_UNITARIZE_PI23);
}
} // !REUNIT_SVD_ONLY?
//
// Compare the product of the eigenvalues computed thus far to the
// absolute value of the determinant.
// If the determinant is very small or the relative error is greater than some predefined value
// then recompute the eigenvalues using a singular-value decomposition.
// Note that this particular calculation contains multiple branches,
// so it doesn't appear to be particularly well-suited to the GPU
// programming model. However, the analytic calculation of the
// unitarization is extremely fast, and if the SVD routine is not called
// too often, we expect pretty good performance.
//
if (arg.allow_svd) {
bool perform_svd = true;
if (!arg.svd_only) {
const Float det = getDeterminant(q).x;
if( fabs(det) >= arg.svd_abs_error) {
if( checkRelativeError(g[0]*g[1]*g[2],det,arg.svd_rel_error) ) perform_svd = false;
}
}
if(perform_svd){
Matrix<complex<Float>,3> tmp2;
// compute the eigenvalues using the singular value decomposition
computeSVD<Float>(q,tempq,tmp2,g);
// The array g contains the eigenvalues of the matrix q
// The determinant is the product of the eigenvalues, and I can use this
// to check the SVD
const Float determinant = getDeterminant(q).x;
const Float gprod = g[0]*g[1]*g[2];
// Check the svd result for errors
if (fabs(gprod - determinant) > arg.max_det_error) {
printf("Warning: Error in determinant computed by SVD : %g > %g\n", fabs(gprod-determinant), arg.max_det_error);
printLink(q);
#ifdef __CUDA_ARCH__
atomicAdd(arg.fails, 1);
#else
(*arg.fails)++;
#endif
}
} // perform_svd?
} // REUNIT_ALLOW_SVD?
Float delta = getAbsMin(g,3);
if (delta < arg.force_filter) {
for (int i=0; i<3; ++i) {
g[i] += arg.force_filter;
q(i,i).x += arg.force_filter;
}
qsq = q*q; // recalculate Q^2
}
// At this point we have finished with the c's
// use these to store sqrt(g)
for (int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
// set the derivative coefficients!
deriv_coeffs->set(g[0], g[1], g[2]);
const Float& denominator = g[2]*(g[0]*g[1]-g[2]);
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator;
c[2] = g[0]/denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
f[0] = c[0];
f[1] = c[1];
f[2] = c[2];
*res = tempq;
return;
}
// "v" denotes a "fattened" link variable
template<class Float, typename Arg>
__device__ __host__
void getUnitarizeForceSite(Matrix<complex<Float>,3>& result, const Matrix<complex<Float>,3> & v,
const Matrix<complex<Float>,3> & outer_prod, Arg &arg)
{
typedef Matrix<complex<Float>,3> Link;
Float f[3];
Float b[6];
Link v_dagger = conj(v); // okay!
Link q = v_dagger*v; // okay!
Link rsqrt_q;
DerivativeCoefficients<Float> deriv_coeffs;
reciprocalRoot<Float>(&rsqrt_q, &deriv_coeffs, f, q, arg); // approx 529 flops (assumes no SVD)
// Pure hack here
b[0] = deriv_coeffs.getB00();
b[1] = deriv_coeffs.getB01();
b[2] = deriv_coeffs.getB02();
b[3] = deriv_coeffs.getB11();
b[4] = deriv_coeffs.getB12();
b[5] = deriv_coeffs.getB22();
result = rsqrt_q*outer_prod;
// We are now finished with rsqrt_q
Link qv_dagger = q*v_dagger;
Link vv_dagger = v*v_dagger;
Link vqv_dagger = v*qv_dagger;
Link temp = f[1]*vv_dagger + f[2]*vqv_dagger;
temp = f[1]*v_dagger + f[2]*qv_dagger;
Link conj_outer_prod = conj(outer_prod);
temp = f[1]*v + f[2]*v*q;
result = result + outer_prod*temp*v_dagger + f[2]*q*outer_prod*vv_dagger;
result = result + v_dagger*conj_outer_prod*conj(temp) + f[2]*qv_dagger*conj_outer_prod*v_dagger;
Link qsqv_dagger = q*qv_dagger;
Link pv_dagger = b[0]*v_dagger + b[1]*qv_dagger + b[2]*qsqv_dagger;
accumBothDerivatives(&result, v, pv_dagger, outer_prod); // 41 flops
Link rv_dagger = b[1]*v_dagger + b[3]*qv_dagger + b[4]*qsqv_dagger;
Link vq = v*q;
accumBothDerivatives(&result, vq, rv_dagger, outer_prod); // 41 flops
Link sv_dagger = b[2]*v_dagger + b[4]*qv_dagger + b[5]*qsqv_dagger;
Link vqsq = vq*q;
accumBothDerivatives(&result, vqsq, sv_dagger, outer_prod); // 41 flops
return;
// 4528 flops - 17 matrix multiplies (198 flops each) + reciprocal root (approx 529 flops) + accumBothDerivatives (41 each) + miscellaneous
} // get unit force term
template<typename Float, typename Arg>
__global__ void getUnitarizeForceField(Arg arg)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= arg.threads) return;
int parity = 0;
if(idx >= arg.threads/2) {
parity = 1;
idx -= arg.threads/2;
}
// This part of the calculation is always done in double precision
Matrix<complex<double>,3> v, result, oprod;
Matrix<complex<Float>,3> v_tmp, result_tmp, oprod_tmp;
for(int dir=0; dir<4; ++dir){
arg.force_old.load((Float*)(oprod_tmp.data), idx, dir, parity);
arg.gauge.load((Float*)(v_tmp.data), idx, dir, parity);
v = v_tmp;
oprod = oprod_tmp;
getUnitarizeForceSite<double>(result, v, oprod, arg);
result_tmp = result;
arg.force.save((Float*)(result_tmp.data), idx, dir, parity);
} // 4*4528 flops per site
return;
} // getUnitarizeForceField
template <typename Float, typename Arg>
void unitarizeForceCPU(Arg &arg) {
Matrix<complex<double>,3> v, result, oprod;
Matrix<complex<Float>,3> v_tmp, result_tmp, oprod_tmp;
for (int parity=0; parity<2; parity++) {
for (int i=0; i<arg.threads/2; i++) {
for (int dir=0; dir<4; dir++) {
arg.force_old.load((Float*)(oprod_tmp.data), i, dir, parity);
arg.gauge.load((Float*)(v_tmp.data), i, dir, parity);
v = v_tmp;
oprod = oprod_tmp;
getUnitarizeForceSite<double>(result, v, oprod, arg);
result_tmp = result;
arg.force.save((Float*)(result_tmp.data), i, dir, parity);
}
}
}
}
void unitarizeForceCPU(cpuGaugeField& newForce, const cpuGaugeField& oldForce, const cpuGaugeField& gauge)
{
int num_failures = 0;
Matrix<complex<double>,3> old_force, new_force, v;
if (gauge.Order() == QUDA_MILC_GAUGE_ORDER) {
if (gauge.Precision() == QUDA_DOUBLE_PRECISION) {
typedef gauge::MILCOrder<double,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<double>(arg);
} else if (gauge.Precision() == QUDA_SINGLE_PRECISION) {
typedef gauge::MILCOrder<float,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<float>(arg);
} else {
errorQuda("Precision = %d not supported", gauge.Precision());
}
} else if (gauge.Order() == QUDA_QDP_GAUGE_ORDER) {
if (gauge.Precision() == QUDA_DOUBLE_PRECISION) {
typedef gauge::QDPOrder<double,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<double>(arg);
} else if (gauge.Precision() == QUDA_SINGLE_PRECISION) {
typedef gauge::QDPOrder<float,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<float>(arg);
} else {
errorQuda("Precision = %d not supported", gauge.Precision());
}
} else {
errorQuda("Only MILC and QDP gauge orders supported\n");
}
if (num_failures) errorQuda("Unitarization failed, failures = %d", num_failures);
return;
} // unitarize_force_cpu
template <typename Float, typename Arg>
class UnitarizeForce : public Tunable {
private:
Arg &arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.threads; }
public:
UnitarizeForce(Arg &arg, const GaugeField& meta) : arg(arg), meta(meta) {
writeAuxString("threads=%d,prec=%lu,stride=%d", meta.Volume(), meta.Precision(), meta.Stride());
}
virtual ~UnitarizeForce() { ; }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( getUnitarizeForceField<Float>), dim3(tp.grid),dim3(tp.block), 0, 0, arg);
}
void preTune() { ; }
void postTune() { hipMemset(arg.fails, 0, sizeof(int)); } // reset fails counter
long long flops() const { return 4ll*4528*meta.Volume(); }
long long bytes() const { return 4ll * arg.threads * (arg.force.Bytes() + arg.force_old.Bytes() + arg.gauge.Bytes()); }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
}; // UnitarizeForce
template<typename Float, typename Gauge>
void unitarizeForce(Gauge newForce, const Gauge oldForce, const Gauge gauge,
const GaugeField &meta, int* fails, long long *flops) {
UnitarizeForceArg<Gauge,Gauge> arg(newForce, oldForce, gauge, meta, fails, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
UnitarizeForce<Float,UnitarizeForceArg<Gauge,Gauge> > unitarizeForce(arg, meta);
unitarizeForce.apply(0);
qudaDeviceSynchronize(); // need to synchronize to ensure failure write has completed
if (flops) *flops += unitarizeForce.flops();
checkCudaError();
}
void unitarizeForce(cudaGaugeField &newForce, const cudaGaugeField &oldForce, const cudaGaugeField &gauge,
int* fails, long long *flops) {
if (oldForce.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Force field should not use reconstruct %d", oldForce.Reconstruct());
if (newForce.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Force field should not use reconstruct %d", newForce.Reconstruct());
if (oldForce.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Gauge field should not use reconstruct %d", gauge.Reconstruct());
if (gauge.Precision() != oldForce.Precision() || gauge.Precision() != newForce.Precision())
errorQuda("Mixed precision not supported");
if (gauge.Order() != oldForce.Order() || gauge.Order() != newForce.Order())
errorQuda("Mixed data ordering not supported");
if (gauge.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (gauge.Precision() == QUDA_DOUBLE_PRECISION) {
typedef typename gauge_mapper<double,QUDA_RECONSTRUCT_NO>::type G;
unitarizeForce<double>(G(newForce), G(oldForce), G(gauge), gauge, fails, flops);
} else if (gauge.Precision() == QUDA_SINGLE_PRECISION) {
typedef typename gauge_mapper<float,QUDA_RECONSTRUCT_NO>::type G;
unitarizeForce<float>(G(newForce), G(oldForce), G(gauge), gauge, fails, flops);
}
} else {
errorQuda("Data order %d not supported", gauge.Order());
}
}
} // namespace fermion_force
} // namespace quda
#endif
| 69ec7e39b407a626cdf38a864911205541fe4c8b.cu | #include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <gauge_field.h>
#include <tune_quda.h>
#include <tune_quda.h>
#include <quda_matrix.h>
#include <gauge_field_order.h>
#ifdef GPU_HISQ_FORCE
// work around for CUDA 7.0 bug on OSX
#if defined(__APPLE__) && CUDA_VERSION >= 7000 && CUDA_VERSION < 7050
#define EXPONENT_TYPE Real
#else
#define EXPONENT_TYPE int
#endif
namespace quda{
namespace { // anonymous
#include <svd_quda.h>
}
#define HISQ_UNITARIZE_PI 3.14159265358979323846
#define HISQ_UNITARIZE_PI23 HISQ_UNITARIZE_PI*2.0/3.0
static double unitarize_eps;
static double force_filter;
static double max_det_error;
static bool allow_svd;
static bool svd_only;
static double svd_rel_error;
static double svd_abs_error;
namespace fermion_force {
template <typename F, typename G>
struct UnitarizeForceArg {
int threads;
F force;
F force_old;
G gauge;
int *fails;
const double unitarize_eps;
const double force_filter;
const double max_det_error;
const int allow_svd;
const int svd_only;
const double svd_rel_error;
const double svd_abs_error;
UnitarizeForceArg(const F &force, const F &force_old, const G &gauge, const GaugeField &meta, int *fails,
double unitarize_eps, double force_filter, double max_det_error, int allow_svd,
int svd_only, double svd_rel_error, double svd_abs_error)
: threads(1), force(force), force_old(force_old), gauge(gauge), fails(fails), unitarize_eps(unitarize_eps),
force_filter(force_filter), max_det_error(max_det_error), allow_svd(allow_svd),
svd_only(svd_only), svd_rel_error(svd_rel_error), svd_abs_error(svd_abs_error)
{
for(int dir=0; dir<4; ++dir) threads *= meta.X()[dir];
}
};
void setUnitarizeForceConstants(double unitarize_eps_, double force_filter_,
double max_det_error_, bool allow_svd_, bool svd_only_,
double svd_rel_error_, double svd_abs_error_)
{
unitarize_eps = unitarize_eps_;
force_filter = force_filter_;
max_det_error = max_det_error_;
allow_svd = allow_svd_;
svd_only = svd_only_;
svd_rel_error = svd_rel_error_;
svd_abs_error = svd_abs_error_;
}
template<class Real>
class DerivativeCoefficients{
private:
Real b[6];
__device__ __host__
Real computeC00(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC01(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC02(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC11(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC12(const Real &, const Real &, const Real &);
__device__ __host__
Real computeC22(const Real &, const Real &, const Real &);
public:
__device__ __host__ void set(const Real & u, const Real & v, const Real & w);
__device__ __host__
Real getB00() const { return b[0]; }
__device__ __host__
Real getB01() const { return b[1]; }
__device__ __host__
Real getB02() const { return b[2]; }
__device__ __host__
Real getB11() const { return b[3]; }
__device__ __host__
Real getB12() const { return b[4]; }
__device__ __host__
Real getB22() const { return b[5]; }
};
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC00(const Real & u, const Real & v, const Real & w){
Real result = -pow(w,static_cast<EXPONENT_TYPE>(3)) * pow(u,static_cast<EXPONENT_TYPE>(6))
+ 3*v*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(4))
+ 3*pow(v,static_cast<EXPONENT_TYPE>(4))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- pow(v,static_cast<EXPONENT_TYPE>(6))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 4*pow(w,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 12*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 16*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 3*pow(v,static_cast<EXPONENT_TYPE>(5))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 8*v*pow(w,static_cast<EXPONENT_TYPE>(4))*u
- 3*pow(v,static_cast<EXPONENT_TYPE>(4))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ pow(w,static_cast<EXPONENT_TYPE>(5))
+ pow(v,static_cast<EXPONENT_TYPE>(3))*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC01(const Real & u, const Real & v, const Real & w){
Real result = - pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(7))
- pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(6))
+ pow(v,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(5)) // This was corrected!
+ 6*v*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5))
- 5*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(4)) // This was corrected!
- pow(v,static_cast<EXPONENT_TYPE>(3))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- 2*pow(v,static_cast<EXPONENT_TYPE>(5))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 6*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 10*v*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 6*pow(v,static_cast<EXPONENT_TYPE>(4))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 3*pow(w,static_cast<EXPONENT_TYPE>(4))*u
- 6*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ 2*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC02(const Real & u, const Real & v, const Real & w){
Real result = pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5))
+ pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- pow(v,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 4*v*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 4*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 3*pow(v,static_cast<EXPONENT_TYPE>(3))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 3*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ v*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC11(const Real & u, const Real & v, const Real & w){
Real result = - w*pow(u,static_cast<EXPONENT_TYPE>(8))
- pow(v,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(7))
+ 7*v*w*pow(u,static_cast<EXPONENT_TYPE>(6))
+ 4*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(5))
- 5*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5))
- 16*pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(4))
- 4*pow(v,static_cast<EXPONENT_TYPE>(4))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 16*v*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
- 3*pow(w,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(2))
+ 12*pow(v,static_cast<EXPONENT_TYPE>(3))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 12*pow(v,static_cast<EXPONENT_TYPE>(2))*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ 3*v*pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC12(const Real & u, const Real & v, const Real & w){
Real result = w*pow(u,static_cast<EXPONENT_TYPE>(6))
+ pow(v,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(5)) // Fixed this!
- 5*v*w*pow(u,static_cast<EXPONENT_TYPE>(4)) // Fixed this!
- 2*pow(v,static_cast<EXPONENT_TYPE>(3))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 4*pow(w,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 6*pow(v,static_cast<EXPONENT_TYPE>(2))*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 6*v*pow(w,static_cast<EXPONENT_TYPE>(2))*u
+ pow(w,static_cast<EXPONENT_TYPE>(3));
return result;
}
template<class Real>
__device__ __host__
Real DerivativeCoefficients<Real>::computeC22(const Real & u, const Real & v, const Real & w){
Real result = - w*pow(u,static_cast<EXPONENT_TYPE>(4))
- pow(v,static_cast<EXPONENT_TYPE>(2))*pow(u,static_cast<EXPONENT_TYPE>(3))
+ 3*v*w*pow(u,static_cast<EXPONENT_TYPE>(2))
- 3*pow(w,static_cast<EXPONENT_TYPE>(2))*u;
return result;
}
template <class Real>
__device__ __host__
void DerivativeCoefficients<Real>::set(const Real & u, const Real & v, const Real & w){
const Real & denominator = 2.0*pow(w*(u*v-w),static_cast<EXPONENT_TYPE>(3));
b[0] = computeC00(u,v,w)/denominator;
b[1] = computeC01(u,v,w)/denominator;
b[2] = computeC02(u,v,w)/denominator;
b[3] = computeC11(u,v,w)/denominator;
b[4] = computeC12(u,v,w)/denominator;
b[5] = computeC22(u,v,w)/denominator;
return;
}
template<class Float>
__device__ __host__
void accumBothDerivatives(Matrix<complex<Float>,3>* result, const Matrix<complex<Float>,3> &left,
const Matrix<complex<Float>,3> &right, const Matrix<complex<Float>,3> &outer_prod)
{
const Float temp = (2.0*getTrace(left*outer_prod)).real();;
for(int k=0; k<3; ++k){
for(int l=0; l<3; ++l){
// Need to write it this way to get it to work
// on the CPU. Not sure why.
// FIXME check this is true
result->operator()(k,l).x += temp*right(k,l).x;
result->operator()(k,l).y += temp*right(k,l).y;
}
}
return;
}
template<class Cmplx>
__device__ __host__
void accumDerivatives(Matrix<Cmplx,3>* result, const Matrix<Cmplx,3> & left, const Matrix<Cmplx,3> & right, const Matrix<Cmplx,3> & outer_prod)
{
Cmplx temp = getTrace(left*outer_prod);
for(int k=0; k<3; ++k){
for(int l=0; l<3; ++l){
result->operator()(k,l) = temp*right(k,l);
}
}
return;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for (int i=1; i<size; ++i) {
T abs_val = fabs(array[i]);
if ((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Float, typename Arg>
__device__ __host__
void reciprocalRoot(Matrix<complex<Float>,3>* res, DerivativeCoefficients<Float>* deriv_coeffs,
Float f[3], Matrix<complex<Float>,3> & q, Arg &arg) {
Matrix<complex<Float>,3> qsq, tempq;
Float c[3];
Float g[3];
if(!arg.svd_only){
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x/2.0;
c[2] = getTrace(tempq).x/3.0;
g[0] = g[1] = g[2] = c[0]/3.;
Float r,s,theta;
s = c[1]/3. - c[0]*c[0]/18;
r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.);
Float cosTheta = r/sqrt(s*s*s);
if (fabs(s) < arg.unitarize_eps) {
cosTheta = 1.;
s = 0.0;
}
if(fabs(cosTheta)>1.0){ r>0 ? theta=0.0 : theta=HISQ_UNITARIZE_PI/3.0; }
else{ theta = acos(cosTheta)/3.0; }
s = 2.0*sqrt(s);
for(int i=0; i<3; ++i){
g[i] += s*cos(theta + (i-1)*HISQ_UNITARIZE_PI23);
}
} // !REUNIT_SVD_ONLY?
//
// Compare the product of the eigenvalues computed thus far to the
// absolute value of the determinant.
// If the determinant is very small or the relative error is greater than some predefined value
// then recompute the eigenvalues using a singular-value decomposition.
// Note that this particular calculation contains multiple branches,
// so it doesn't appear to be particularly well-suited to the GPU
// programming model. However, the analytic calculation of the
// unitarization is extremely fast, and if the SVD routine is not called
// too often, we expect pretty good performance.
//
if (arg.allow_svd) {
bool perform_svd = true;
if (!arg.svd_only) {
const Float det = getDeterminant(q).x;
if( fabs(det) >= arg.svd_abs_error) {
if( checkRelativeError(g[0]*g[1]*g[2],det,arg.svd_rel_error) ) perform_svd = false;
}
}
if(perform_svd){
Matrix<complex<Float>,3> tmp2;
// compute the eigenvalues using the singular value decomposition
computeSVD<Float>(q,tempq,tmp2,g);
// The array g contains the eigenvalues of the matrix q
// The determinant is the product of the eigenvalues, and I can use this
// to check the SVD
const Float determinant = getDeterminant(q).x;
const Float gprod = g[0]*g[1]*g[2];
// Check the svd result for errors
if (fabs(gprod - determinant) > arg.max_det_error) {
printf("Warning: Error in determinant computed by SVD : %g > %g\n", fabs(gprod-determinant), arg.max_det_error);
printLink(q);
#ifdef __CUDA_ARCH__
atomicAdd(arg.fails, 1);
#else
(*arg.fails)++;
#endif
}
} // perform_svd?
} // REUNIT_ALLOW_SVD?
Float delta = getAbsMin(g,3);
if (delta < arg.force_filter) {
for (int i=0; i<3; ++i) {
g[i] += arg.force_filter;
q(i,i).x += arg.force_filter;
}
qsq = q*q; // recalculate Q^2
}
// At this point we have finished with the c's
// use these to store sqrt(g)
for (int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
// set the derivative coefficients!
deriv_coeffs->set(g[0], g[1], g[2]);
const Float& denominator = g[2]*(g[0]*g[1]-g[2]);
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator;
c[2] = g[0]/denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
f[0] = c[0];
f[1] = c[1];
f[2] = c[2];
*res = tempq;
return;
}
// "v" denotes a "fattened" link variable
template<class Float, typename Arg>
__device__ __host__
void getUnitarizeForceSite(Matrix<complex<Float>,3>& result, const Matrix<complex<Float>,3> & v,
const Matrix<complex<Float>,3> & outer_prod, Arg &arg)
{
typedef Matrix<complex<Float>,3> Link;
Float f[3];
Float b[6];
Link v_dagger = conj(v); // okay!
Link q = v_dagger*v; // okay!
Link rsqrt_q;
DerivativeCoefficients<Float> deriv_coeffs;
reciprocalRoot<Float>(&rsqrt_q, &deriv_coeffs, f, q, arg); // approx 529 flops (assumes no SVD)
// Pure hack here
b[0] = deriv_coeffs.getB00();
b[1] = deriv_coeffs.getB01();
b[2] = deriv_coeffs.getB02();
b[3] = deriv_coeffs.getB11();
b[4] = deriv_coeffs.getB12();
b[5] = deriv_coeffs.getB22();
result = rsqrt_q*outer_prod;
// We are now finished with rsqrt_q
Link qv_dagger = q*v_dagger;
Link vv_dagger = v*v_dagger;
Link vqv_dagger = v*qv_dagger;
Link temp = f[1]*vv_dagger + f[2]*vqv_dagger;
temp = f[1]*v_dagger + f[2]*qv_dagger;
Link conj_outer_prod = conj(outer_prod);
temp = f[1]*v + f[2]*v*q;
result = result + outer_prod*temp*v_dagger + f[2]*q*outer_prod*vv_dagger;
result = result + v_dagger*conj_outer_prod*conj(temp) + f[2]*qv_dagger*conj_outer_prod*v_dagger;
Link qsqv_dagger = q*qv_dagger;
Link pv_dagger = b[0]*v_dagger + b[1]*qv_dagger + b[2]*qsqv_dagger;
accumBothDerivatives(&result, v, pv_dagger, outer_prod); // 41 flops
Link rv_dagger = b[1]*v_dagger + b[3]*qv_dagger + b[4]*qsqv_dagger;
Link vq = v*q;
accumBothDerivatives(&result, vq, rv_dagger, outer_prod); // 41 flops
Link sv_dagger = b[2]*v_dagger + b[4]*qv_dagger + b[5]*qsqv_dagger;
Link vqsq = vq*q;
accumBothDerivatives(&result, vqsq, sv_dagger, outer_prod); // 41 flops
return;
// 4528 flops - 17 matrix multiplies (198 flops each) + reciprocal root (approx 529 flops) + accumBothDerivatives (41 each) + miscellaneous
} // get unit force term
template<typename Float, typename Arg>
__global__ void getUnitarizeForceField(Arg arg)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= arg.threads) return;
int parity = 0;
if(idx >= arg.threads/2) {
parity = 1;
idx -= arg.threads/2;
}
// This part of the calculation is always done in double precision
Matrix<complex<double>,3> v, result, oprod;
Matrix<complex<Float>,3> v_tmp, result_tmp, oprod_tmp;
for(int dir=0; dir<4; ++dir){
arg.force_old.load((Float*)(oprod_tmp.data), idx, dir, parity);
arg.gauge.load((Float*)(v_tmp.data), idx, dir, parity);
v = v_tmp;
oprod = oprod_tmp;
getUnitarizeForceSite<double>(result, v, oprod, arg);
result_tmp = result;
arg.force.save((Float*)(result_tmp.data), idx, dir, parity);
} // 4*4528 flops per site
return;
} // getUnitarizeForceField
template <typename Float, typename Arg>
void unitarizeForceCPU(Arg &arg) {
Matrix<complex<double>,3> v, result, oprod;
Matrix<complex<Float>,3> v_tmp, result_tmp, oprod_tmp;
for (int parity=0; parity<2; parity++) {
for (int i=0; i<arg.threads/2; i++) {
for (int dir=0; dir<4; dir++) {
arg.force_old.load((Float*)(oprod_tmp.data), i, dir, parity);
arg.gauge.load((Float*)(v_tmp.data), i, dir, parity);
v = v_tmp;
oprod = oprod_tmp;
getUnitarizeForceSite<double>(result, v, oprod, arg);
result_tmp = result;
arg.force.save((Float*)(result_tmp.data), i, dir, parity);
}
}
}
}
void unitarizeForceCPU(cpuGaugeField& newForce, const cpuGaugeField& oldForce, const cpuGaugeField& gauge)
{
int num_failures = 0;
Matrix<complex<double>,3> old_force, new_force, v;
if (gauge.Order() == QUDA_MILC_GAUGE_ORDER) {
if (gauge.Precision() == QUDA_DOUBLE_PRECISION) {
typedef gauge::MILCOrder<double,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<double>(arg);
} else if (gauge.Precision() == QUDA_SINGLE_PRECISION) {
typedef gauge::MILCOrder<float,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<float>(arg);
} else {
errorQuda("Precision = %d not supported", gauge.Precision());
}
} else if (gauge.Order() == QUDA_QDP_GAUGE_ORDER) {
if (gauge.Precision() == QUDA_DOUBLE_PRECISION) {
typedef gauge::QDPOrder<double,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<double>(arg);
} else if (gauge.Precision() == QUDA_SINGLE_PRECISION) {
typedef gauge::QDPOrder<float,18> G;
UnitarizeForceArg<G,G> arg(G(newForce), G(oldForce), G(gauge), gauge, &num_failures, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
unitarizeForceCPU<float>(arg);
} else {
errorQuda("Precision = %d not supported", gauge.Precision());
}
} else {
errorQuda("Only MILC and QDP gauge orders supported\n");
}
if (num_failures) errorQuda("Unitarization failed, failures = %d", num_failures);
return;
} // unitarize_force_cpu
template <typename Float, typename Arg>
class UnitarizeForce : public Tunable {
private:
Arg &arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.threads; }
public:
UnitarizeForce(Arg &arg, const GaugeField& meta) : arg(arg), meta(meta) {
writeAuxString("threads=%d,prec=%lu,stride=%d", meta.Volume(), meta.Precision(), meta.Stride());
}
virtual ~UnitarizeForce() { ; }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
getUnitarizeForceField<Float><<<tp.grid,tp.block>>>(arg);
}
void preTune() { ; }
void postTune() { cudaMemset(arg.fails, 0, sizeof(int)); } // reset fails counter
long long flops() const { return 4ll*4528*meta.Volume(); }
long long bytes() const { return 4ll * arg.threads * (arg.force.Bytes() + arg.force_old.Bytes() + arg.gauge.Bytes()); }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
}; // UnitarizeForce
template<typename Float, typename Gauge>
void unitarizeForce(Gauge newForce, const Gauge oldForce, const Gauge gauge,
const GaugeField &meta, int* fails, long long *flops) {
UnitarizeForceArg<Gauge,Gauge> arg(newForce, oldForce, gauge, meta, fails, unitarize_eps, force_filter,
max_det_error, allow_svd, svd_only, svd_rel_error, svd_abs_error);
UnitarizeForce<Float,UnitarizeForceArg<Gauge,Gauge> > unitarizeForce(arg, meta);
unitarizeForce.apply(0);
qudaDeviceSynchronize(); // need to synchronize to ensure failure write has completed
if (flops) *flops += unitarizeForce.flops();
checkCudaError();
}
void unitarizeForce(cudaGaugeField &newForce, const cudaGaugeField &oldForce, const cudaGaugeField &gauge,
int* fails, long long *flops) {
if (oldForce.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Force field should not use reconstruct %d", oldForce.Reconstruct());
if (newForce.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Force field should not use reconstruct %d", newForce.Reconstruct());
if (oldForce.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Gauge field should not use reconstruct %d", gauge.Reconstruct());
if (gauge.Precision() != oldForce.Precision() || gauge.Precision() != newForce.Precision())
errorQuda("Mixed precision not supported");
if (gauge.Order() != oldForce.Order() || gauge.Order() != newForce.Order())
errorQuda("Mixed data ordering not supported");
if (gauge.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (gauge.Precision() == QUDA_DOUBLE_PRECISION) {
typedef typename gauge_mapper<double,QUDA_RECONSTRUCT_NO>::type G;
unitarizeForce<double>(G(newForce), G(oldForce), G(gauge), gauge, fails, flops);
} else if (gauge.Precision() == QUDA_SINGLE_PRECISION) {
typedef typename gauge_mapper<float,QUDA_RECONSTRUCT_NO>::type G;
unitarizeForce<float>(G(newForce), G(oldForce), G(gauge), gauge, fails, flops);
}
} else {
errorQuda("Data order %d not supported", gauge.Order());
}
}
} // namespace fermion_force
} // namespace quda
#endif
|
4c2e661b4c7433b50fc7a6f4fb27a999c0805678.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ...
// +
// lst[n-1]}
#include <wb.h>
#define BLOCK_SIZE 512
#define SECTION_SIZE BLOCK_SIZE * 2
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void scan(float *X, float *Y, float *S, int inputSize, int const section_size) {
// Use Brent-Kung algorithm
// CAUTION! the length parameter of shared memory should be a constant,
// SECTION_SIZE should be bigger than section_size
__shared__ float XY[SECTION_SIZE];
int i = 2*blockIdx.x*blockDim.x + threadIdx.x;
if (i < inputSize) XY[threadIdx.x] = X[i];
if (i+blockDim.x < inputSize) XY[threadIdx.x+blockDim.x] = X[i+blockDim.x];
// Reduection
for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) {
__syncthreads();
int index = (threadIdx.x+1) * 2* stride -1;
if (index < section_size) {
XY[index] += XY[index - stride];
}
}
// Distribution
for (int stride = ceil(section_size/4.0); stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x+1)*stride*2 - 1;
if(index + stride < section_size) {
XY[index + stride] += XY[index];
}
}
__syncthreads();
if (i < inputSize) Y[i] = XY[threadIdx.x];
if (i+blockDim.x < inputSize) Y[i+blockDim.x] = XY[threadIdx.x+blockDim.x];
if (S) {
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
S[blockIdx.x] = XY[section_size-1];
}
}
}
__global__ void sumUp(float *S, float *Y, int len) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len && blockIdx.x > 0) {
Y[i] += S[blockIdx.x-1];
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float *)malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(hipMalloc((void **)&deviceInput, numElements * sizeof(float)));
wbCheck(hipMalloc((void **)&deviceOutput, numElements * sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(hipMemset(deviceOutput, 0, numElements * sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(hipMemcpy(deviceInput, hostInput, numElements * sizeof(float),
hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Define some vars
int SECTION_CNT = ceil(numElements/(SECTION_SIZE)*1.0);
float *auxiliary;
hipMalloc((void **) &auxiliary, SECTION_CNT * sizeof(float));
wbTime_start(Compute, "Performing CUDA computation");
// Phase 1
dim3 DimBlock(BLOCK_SIZE, 1, 1);
dim3 DimGrid(ceil(numElements/(SECTION_SIZE*1.0)), 1, 1);
hipLaunchKernelGGL(( scan), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInput, deviceOutput, auxiliary, numElements, SECTION_SIZE);
hipDeviceSynchronize();
// Phase 2
dim3 DimBlock2(ceil(SECTION_CNT/2.0), 1, 1);
dim3 DimGrid2(1, 1, 1);
hipLaunchKernelGGL(( scan), dim3(DimGrid2), dim3(DimBlock2), 0, 0, auxiliary, auxiliary, NULL, SECTION_CNT, SECTION_CNT);
hipDeviceSynchronize();
// Phase 3
dim3 DimBlock3(SECTION_SIZE, 1, 1);
dim3 DimGrid3(ceil(numElements/(SECTION_SIZE*1.0)), 1, 1);
hipLaunchKernelGGL(( sumUp), dim3(DimGrid3), dim3(DimBlock3), 0, 0, auxiliary, deviceOutput, numElements);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements * sizeof(float),
hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
| 4c2e661b4c7433b50fc7a6f4fb27a999c0805678.cu | // MP Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ...
// +
// lst[n-1]}
#include <wb.h>
#define BLOCK_SIZE 512
#define SECTION_SIZE BLOCK_SIZE * 2
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void scan(float *X, float *Y, float *S, int inputSize, int const section_size) {
// Use Brent-Kung algorithm
// CAUTION! the length parameter of shared memory should be a constant,
// SECTION_SIZE should be bigger than section_size
__shared__ float XY[SECTION_SIZE];
int i = 2*blockIdx.x*blockDim.x + threadIdx.x;
if (i < inputSize) XY[threadIdx.x] = X[i];
if (i+blockDim.x < inputSize) XY[threadIdx.x+blockDim.x] = X[i+blockDim.x];
// Reduection
for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) {
__syncthreads();
int index = (threadIdx.x+1) * 2* stride -1;
if (index < section_size) {
XY[index] += XY[index - stride];
}
}
// Distribution
for (int stride = ceil(section_size/4.0); stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x+1)*stride*2 - 1;
if(index + stride < section_size) {
XY[index + stride] += XY[index];
}
}
__syncthreads();
if (i < inputSize) Y[i] = XY[threadIdx.x];
if (i+blockDim.x < inputSize) Y[i+blockDim.x] = XY[threadIdx.x+blockDim.x];
if (S) {
__syncthreads();
if (threadIdx.x == (blockDim.x-1)) {
S[blockIdx.x] = XY[section_size-1];
}
}
}
__global__ void sumUp(float *S, float *Y, int len) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len && blockIdx.x > 0) {
Y[i] += S[blockIdx.x-1];
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float *)malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(cudaMalloc((void **)&deviceInput, numElements * sizeof(float)));
wbCheck(cudaMalloc((void **)&deviceOutput, numElements * sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(cudaMemset(deviceOutput, 0, numElements * sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(cudaMemcpy(deviceInput, hostInput, numElements * sizeof(float),
cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Define some vars
int SECTION_CNT = ceil(numElements/(SECTION_SIZE)*1.0);
float *auxiliary;
cudaMalloc((void **) &auxiliary, SECTION_CNT * sizeof(float));
wbTime_start(Compute, "Performing CUDA computation");
// Phase 1
dim3 DimBlock(BLOCK_SIZE, 1, 1);
dim3 DimGrid(ceil(numElements/(SECTION_SIZE*1.0)), 1, 1);
scan<<<DimGrid, DimBlock>>>(deviceInput, deviceOutput, auxiliary, numElements, SECTION_SIZE);
cudaDeviceSynchronize();
// Phase 2
dim3 DimBlock2(ceil(SECTION_CNT/2.0), 1, 1);
dim3 DimGrid2(1, 1, 1);
scan<<<DimGrid2, DimBlock2>>>(auxiliary, auxiliary, NULL, SECTION_CNT, SECTION_CNT);
cudaDeviceSynchronize();
// Phase 3
dim3 DimBlock3(SECTION_SIZE, 1, 1);
dim3 DimGrid3(ceil(numElements/(SECTION_SIZE*1.0)), 1, 1);
sumUp<<<DimGrid3, DimBlock3>>>(auxiliary, deviceOutput, numElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements * sizeof(float),
cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
|
c1d842c25819f7dfdd187a04ca78b229ac6bd1e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Code to do one dimensional spiking model from Mayte's note */
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <hiprand/hiprand.h>
#include "parameters.h"
#include "EventDrivenMap.hpp"
#include "hip/hip_vector_types.h"
#define CUDA_ERROR_CHECK
#define CUDA_CALL( err) __cudaCall( err, __FILE__, __LINE__ )
#define CUDA_CHECK_ERROR() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
}
inline void __curandCall( hiprandStatus_t err, const char *file, const int line)
{
#ifdef CURAND_ERROR_CHECK
if ( HIPRAND_STATUS_SUCCESS != err)
{
fprintf( stderr, "curandCall() failed at %s:%i",
file, line);
exit( -1 );
}
#endif
}
EventDrivenMap::EventDrivenMap( const ParameterList* pParameterList)
{
mNetworkSize = (*pParameterList).networkSize;
mNoThreads = (*pParameterList).noThreads;
mNoBlocks = (mNetworkSize+mNoThreads-1)/mNoThreads;
mDomainSize = (*pParameterList).domainSize;
mDx = mDomainSize/(mNetworkSize-1);
mDt = (*pParameterList).timestep;
CUDA_CALL( hipMalloc( &mpGlobalState, mNetworkSize*mNetworkSize*sizeof(float4)));
CUDA_CALL( hipMalloc( &mpRefractTime, mNetworkSize*mNetworkSize*sizeof(float)));
CUDA_CALL( hipMalloc( &mpGlobalZone, mNetworkSize*mNetworkSize*sizeof(short)));
CUDA_CALL( hipMalloc( &mpFiringVal, mNetworkSize*mNetworkSize*sizeof(firing)));
CUDA_CALL( hipMalloc( &mpFiringValTemp, mNoBlocks*sizeof(firing)));
CUDA_CALL( hipMalloc( &mpEventNo, sizeof(int)));
CUDA_CALL( hipHostMalloc( &mpHost_eventNo, sizeof(int)));
}
EventDrivenMap::~EventDrivenMap()
{
hipFree( mpGlobalState);
hipFree( mpGlobalZone);
hipFree( mpRefractTime);
hipFree( mpFiringVal);
hipFree( mpFiringValTemp);
hipFree( mpEventNo);
hipFree( mpHost_eventNo);
}
void EventDrivenMap::SimulateNetwork( const float finalTime)
{
InitialiseNetwork();
while (mTime<finalTime)
{
SimulateStep();
}
}
void EventDrivenMap::InitialiseNetwork()
{
mTime = 0.0f;
hipLaunchKernelGGL(( InitialiseNetworkKernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, mpGlobalState,
mpGlobalState, mpFiringVal);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( ResetMemoryKernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, mpFiringVal, mNetworkSize, mDt);
CUDA_CHECK_ERROR();
}
__global__ void InitialiseNetworkKernel( float4* pGlobalState,
unsigned int* pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
if (index<networkSize)
{
pGlobalState = "something";
pGlobalZone = "something";
}
}
void EventDrivenMap::SimulateStep()
{
mTime = 0.0f;
while (mTime<mDt)
{
// First, find spiking cell
FindMinimumSpikeTime();
// Update all cells
hipLaunchKernelGGL(( UpdateZone1Kernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( UpdateZone2Kernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( UpdateZone3Kernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( UpdateZone4Kernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone, mpRefractTime);
CUDA_CHECK_ERROR();
// Update time
mTime += (*mpSpikingCell).time;
// Reset neuron that fired
hipLaunchKernelGGL(( ApplyResetKernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, mpGlobalState, (*mpSpikingCell).index);
CUDA_CHECK_ERROR();
// Reset memory
hipLaunchKernelGGL(( ResetMemoryKernel), dim3(mNoThreads),dim3(mNoThreads), 0, 0, mpFiringVal, mNetworkSize);
CUDA_CHECK_ERROR();
}
}
__global__ void UpdateZone1Kernel( const float eventTime,
float4* pGlobalState,
unsigned int *pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
local_state = UpdateZone1( eventTime, local_state);
pGlobalState[index] = local_state;
pGlobalZone[index] += (local_state.x>v_left);
}
}
__device__ float4 UpdateZone1( float eventTime,
float4 state)
{
float crossTime = eventTime;
unsigned short changeZoneFlag = 0;
float v = fun1( crossTime, state.x, state.y, state.z, state.w);
if (v > v_left)
{
changeZoneFlag = 1;
float dv = dfun1( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun1( crossTime, state.x, state.y, state.z, state.w);
dv = dfun1( crossTime, state.x, state.y, state.z, state.w);
}
}
state = UpdateStateZone1( crossTime, state);
if (changeZoneFlag)
{
state.x = v_left;
state = UpdateStateZone2( stepTime-crossTime, state);
}
return state;
}
__device__ float4 UpdateStateZone1( float t, float4 state)
{
state.x = (state.x*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau));
state.y = +(beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void UpdateZone2Kernel( const float eventTime,
float4* pGlobalState,
unsigned int *pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int local_zone = pGlobalZone[index];
bool correct_zone = (local_zone == 2);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
// Update state
local_state = UpdateZone2( eventTime, local_state);
pGlobalState[index] = local_state;
// Update zone
local_zone += (local_state.x>v_right);
local_zone -= (local_state.x<v_left);
pGlobalZone[index] = local_zone;
}
}
__device__ float4 UpdateZone2( float eventTime,
float4 state)
{
float crossTime = eventTime;
unsigned short changeZoneFlag = 0;
float v = fun2( crossTime, state.x, state.y, state.z, state.w, 0.0f);
if (v > V_right)
{
changeZoneFlag = 1;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_right);
float dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_right);
dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
}
}
if (v < V_left)
{
changeZoneFlag = -1;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_left);
float dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_left);
dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
}
}
state = UpdateStateZone2( crossTime, state);
if (changeZoneFlag==1)
{
state.x = V_right;
state = UpdateStateZone3( stepTime-crossTime, state);
}
if (changeZoneFlag==-1)
{
state.x = V_left;
state = UpdateStateZone1( stepTime-crossTime, state);
}
return state;
}
__device__ float4 UpdateStateZone2( float t, float4 state)
{
state.x =
(zone==2)*(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau));
state.y =
(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void UpdateZone3Kernel( const float eventTime,
float4* pGlobalState,
unsigned int *pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 3);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
// Update state
local_state = UpdateZone3( eventTime, local_state);
pGlobalState[index] = local_state;
// Update zone
pGlobalZone[index] -= (local_state.x<v_right);
}
}
__device__ float4 UpdateZone3( float eventTime,
float4 state)
{
float crossTime = eventTime;
unsigned short changeZoneFlag = 0;
float v = fun3( crossTime, state.x, state.y, state.z, state.w, 0.0f);
if (v < V_right)
{
changeZoneFlag = 1;
v = fun3( crossTime, state.x, state.y, state.z, state.w, V_right);
float dv = dfun3( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun3( crossTime, state.x, state.y, state.z, state.w, V_right);
dv = dfun3( crossTime, state.x, state.y, state.z, state.w);
}
}
state = UpdateStateZone3( crossTime, state);
if (changeZoneFlag)
{
state.x = V_right;
state = UpdateStateZone2( stepTime-crossTime, state);
}
return state;
}
__device__ float4 UpdateStateZone3( float t, float4 state)
{
state.x =
(state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau));
state.y =
(beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void UpdateZone4Kernel( const float eventTime,
float4 pGlobalState,
unsigned int pGlobalZone,
float pRefractTime)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 4);
if (correct_zone)
{
// Update state
float4 local_state = pGlobalZone[index];
float local_refract_time = pRefractTime[index];
float sim_time = min( local_refract_time, eventTime);
local_state = UpdateStateZone4( sim_time, local_state);
local_refract_time -= eventTime;
local_refract_time *= (-1);
if (local_refract_time>0.0f)
{
local_state = UpdateStateZone2( local_refract_time, local_state);
}
// Update zone
local_refract_time *= (-1);
if (local_refract_timea > 0.0f)
{
local_refract_time = 0.0f;
pGlobalZone[index] = 2;
}
pRefractTime[index] = local_refract_time;
}
}
__device__ float4 UpdateStateZone4( float t, float4 state)
{
state.x = V_r;
state.y =
(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void ResetMemoryKernel( firing pFiringVal, const unsigned int networkSize, const float stepSize)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
if (index<networkSize)
{
pFiringVal[index].time = mDt;
pFiringVal[index].index = 0;
}
}
//----------------------------------------------------------------------------
__global__ void updateZone4Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone4( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__inline__ __device__ float4 updateZone1( float4 state, float t)
{
float4 temp_state = state;
temp_state.x = state.x*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau);
temp_state.y = beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h);
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateState( float4 state, float t, unsigned short zone)
{
float4 temp_state = state;
temp_state.x =
(zone==2)*(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau))
+(zone==1)*(state.x*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau))
+(zone==3)*(state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau))
+(zone==4)*V_r;
temp_state.y =
((zone==2)|(zone==4))*(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)))
+((zone==1)|(zone==3))*(beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h));
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateZone2( float4 state, float t)
{
float4 temp_state = state;
temp_state.x =
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau);
temp_state.y =
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0));
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateZone3( float4 state, float t)
{
float4 temp_state = state;
temp_state.x = state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau);
temp_state.y = beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h);
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateZone4( float4 state, float t)
{
float4 temp_state = state;
temp_state.x = V_r;
temp_state.y =
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0));
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__device__ float fun1( float t, float v0, float n0, float u0, float y0)
{
return v0*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2)
-(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2)
-I+I*expf(-t/tau)-V_left;
}
__device__ float fun2( float t, float v0, float n0, float u0, float y0, float thresh)
{
return
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau)
- thresh;
}
__device__ float fun3( float t, float v0, float n0, float u0, float y0, float thresh)
{
return v0*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2)
-(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2)
-I+I*expf(-t/tau) - thresh;
}
__device__ float dfun1( float t, float v0, float n0, float u0, float y0)
{
return
(beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h));
}
__device__ float dfun2( float t, float v0, float n0, float u0, float y0)
{
return
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/gamma_centre-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-(alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau+(I*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau-(gs*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)-(I*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau_h)-(beta_centre*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau)+(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(alpha*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+I*gamma_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-gamma_centre*gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau_h+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau);
}
__device__ float dfun3( float t, float v0, float n0, float u0, float y0)
{
return
(beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h));
}
__device__ float eventTimeZone1( float v0, float n0, float u0, float y0)
{
float f, df, estimatedTime = 0.0f;
f = fun1( estimatedTime, v0, n0, u0, y0);
df = dfun1( estimatedTime, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTime -= f/df;
f = fun1( estimatedTime, v0, n0, u0, y0);
df = dfun1( estimatedTime, v0, n0, u0, y0);
}
return estimatedTime;
}
__device__ void eventTimeZone2( float v0, float n0, float u0, float y0,
float *t, unsigned short *cross)
{
float f, df;
float estimatedTimeLeft = 0.0f;
float estimatedTimeRight = 0.0f;
f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left);
df = dfun2( estimatedTimeLeft, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeLeft -= f/df;
f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left);
df = dfun2( estimatedTimeLeft, v0, n0, u0, y0);
}
f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right);
df = dfun2( estimatedTimeRight, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeRight -= f/df;
f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right);
df = dfun2( estimatedTimeRight, v0, n0, u0, y0);
}
*cross = 2;
if (estimatedTimeRight<estimatedTimeLeft)
{
estimatedTimeLeft = estimatedTimeRight;
*cross = 3;
}
*t = estimatedTimeLeft;
}
__device__ void eventTimeZone3( float v0, float n0, float u0, float y0,
float *t, unsigned short *cross)
{
float f, df;
float estimatedTimeLeft = 0.0f;
float estimatedTimeRight = 0.0f;
f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right);
df = dfun3( estimatedTimeLeft, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeLeft -= f/df;
f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right);
df = dfun3( estimatedTimeLeft, v0, n0, u0, y0);
}
f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th);
df = dfun3( estimatedTimeRight, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeRight -= f/df;
f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th);
df = dfun3( estimatedTimeRight, v0, n0, u0, y0);
}
*cross = 4;
if (estimatedTimeRight<estimatedTimeLeft)
{
estimatedTimeLeft = estimatedTimeRight;
*cross = 5;
}
*t = estimatedTimeLeft;
}
__global__ void eventTimeZone1Kernel( const float4* pGlobal_state,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x + blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
(*pVal).time = 100000.0f;
if (correct_zone)
{
float4 local_state = pGlobal_state[k];
pVal[k].time = eventTimeZone1(local_state.x,local_state.y,local_state.z,local_state.w);
pVal[k].index = k;
pVal[k].cross = 1;
}
}
__global__ void eventTimeZone2Kernel( const float4* pGlobal_state,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 2);
float4 local_state = pGlobal_state[k];
float local_time = 1000000.0f;
unsigned short cross;
if (correct_zone)
{
eventTimeZone2(local_state.x,local_state.y,local_state.z,local_state.w,&local_time,&cross);
pVal[k].time = local_time;
pVal[k].index = k;
pVal[k].cross = cross;
}
}
__global__ void eventTimeZone3Kernel( const float4* pGlobal_state,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 3);
float4 local_state = pGlobal_state[k];
float local_time = 1000000.0f;
unsigned short cross;
if (correct_zone)
{
eventTimeZone3(local_state.x,local_state.y,local_state.z,local_state.w,&local_time,&cross);
pVal[k].time = local_time;
pVal[k].index = k;
pVal[k].cross = cross;
}
}
__global__ void eventTimeZone4Kernel( const float *pRefractTime,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 4);
if (correct_zone)
{
pVal[k].time = pRefractTime[k];
pVal[k].index = k;
pVal[k].cross = 6;
}
}
__global__ void updateStateKernel( float4* pGlobalState,
float* pRefractTimes,
const unsigned short* pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
float4 local_state = pGlobalState[k];
float refract_time = pRefractTimes[k];
local_state = updateState( local_state, eventTime, pGlobalZone[k]);
refract_time -= eventTime;
pGlobalState[k] = local_state;
pRefractTimes[k] = refract_time*(refract_time>0.0f);
}
__global__ void updateZone1Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone1( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void updateZone2Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone2( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void updateZone3Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone3( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void updateZone4Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone4( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void ApplyResetKernel( float4* pGlobalState,
unsigned int* pGlobalZone,
firing* pFiringVal,
float* pRefractTime)
{
int k = threadIdx.x+blockDim.x*blockIdx.x+index-spatial_extent;
unsigned int index = (*pFiringVal).index;
if ((k>=0)&&(k<N))
{
pGlobalState[k].w += alpha*W*dx;
}
if (threadIdx.x==0)
{
pGlobalState[index].x = V_r;
pGlobalZone [index] = 2;
pRefractTime[index] = tau_r;
}
}
__global__ void InitialiseKernel( float4* pGlobalState,
unsigned short* pGlobalZone)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
float4 local_state = (float4){0.0f,0.0f,0.0f,0.0f};
pGlobalState[k] = local_state;
pGlobalZone[k] = 2;
}
void updateZones( const struct firing* val,
unsigned short* pGlobalZone,
float4* pGlobalState,
float* pRefractTime)
{
unsigned int cross = (*val).cross;
unsigned int index = (*val).index;
unsigned short new_zone;
switch (cross)
{
case 1 :
new_zone = 2;
break;
case 2 :
new_zone = 1;
break;
case 3 :
new_zone = 3;
break;
case 4 :
new_zone = 2;
break;
case 5 :
new_zone = 4;
break;
case 6 :
new_zone = 2;
break;
}
// Update zone of neuron that fired
CUDA_CALL( hipMemcpy( pGlobalZone+index, &new_zone, sizeof(short),
hipMemcpyHostToDevice));
// If cell fired, need to reset voltage and send out synaptic input
if (cross==5)
{
hipLaunchKernelGGL(( ApplyResetKernel), dim3((2*spatial_extent+noThreads-1)/noThreads),dim3(noThreads), 0, 0,
pGlobalState, index, pRefractTime);
}
}
int main( int argc , char *argv[])
{
// Allocate memory
float4* p_global_state;
float* p_refract_time;
struct firing* p_firing_val;
struct firing* p_firing_val_temp;
struct firing* p_firing_pinned;
unsigned short* p_global_zone;
// Allocate memory
CUDA_CALL( hipMalloc( &p_global_state, N*sizeof(float4)));
CUDA_CALL( hipMalloc( &p_refract_time, N*sizeof(float)));
CUDA_CALL( hipMalloc( &p_global_zone, N*sizeof(short)));
CUDA_CALL( hipMalloc( &p_firing_val, N*sizeof(firing)));
CUDA_CALL( hipMalloc( &p_firing_val_temp, noBlocks*sizeof(firing)));
// Pinned memory
CUDA_CALL( hipHostMalloc( (void**) &p_firing_pinned, sizeof(firing)));
hipLaunchKernelGGL(( InitialiseKernel), dim3(noBlocks),dim3(noThreads), 0, 0, p_global_state, p_global_zone);
float final_time = 100.0f;
float current_time = 0.0f;
while (current_time<final_time)
{
hipLaunchKernelGGL(( eventTimeZone1Kernel), dim3(noBlocks),dim3(noThreads), 0, 0,
p_global_state,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( eventTimeZone2Kernel), dim3(noBlocks),dim3(noThreads), 0, 0,
p_global_state,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( eventTimeZone3Kernel), dim3(noBlocks),dim3(noThreads), 0, 0,
p_global_state,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( eventTimeZone4Kernel), dim3(noBlocks),dim3(noThreads), 0, 0,
p_refract_time,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
// Find minimum spike time
hipLaunchKernelGGL(( deviceReduceMinKernel), dim3(noBlocks),dim3(noThreads), 0, 0,
p_firing_val, N, p_firing_val_temp);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( deviceReduceMinKernel), dim3(1),dim3(noThreads), 0, 0,
p_firing_val_temp, noBlocks, p_firing_val_temp);
CUDA_CHECK_ERROR();
// Update - assume transfer to page-locked memory
CUDA_CALL( hipMemcpy( p_firing_pinned, p_firing_val, sizeof(firing),
hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( updateStateKernel), dim3(noBlocks),dim3(noThreads), 0, 0,
p_global_state, p_refract_time, p_global_zone, (*p_firing_pinned).time);
CUDA_CHECK_ERROR();
// Update zones
updateZones( p_firing_pinned, p_global_zone, p_global_state, p_refract_time);
// Update time
current_time += (*p_firing_pinned).time;
}
hipFree( p_global_state);
hipFree( p_global_zone);
hipFree( p_refract_time);
hipFree( p_firing_val);
hipFree( p_firing_val_temp);
hipHostFree( p_firing_pinned);
}
__device__ float UpdateVoltageZone3( const float4 state, const float t)
{
return (state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau));
}
//-------------------------------------------------------------------------------------------
__device__ float FindSpikeTime( const float4 state)
{
float spikeTime = 0.0f;
float f, df;
f = fun3( spikeTime, state.x, state.y, state.z, state.w, V_th);
df = dfun3( spikeTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol) {
spikeTime -= f/df;
f = fun3( spikeTime, state.x, state.y, state.z, state.w, V_th);
df = dfun3( spikeTime, state.x, state.y, state.z, state.w);
}
return spikeTime;
}
__global__ void FindSpikeTimeKernel( const float4* pGlobalState,
const short* pGlobalZone,
const float stepTime,
EventDrivenMap::firing* pFiringVal,
unsigned int* pEventNo)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 3);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
float local_v = fun3( stepTime, local_state.x, local_state.y, local_state.z, local_state.z, 0.0f);
float spikeTime = stepTime;
unsigned int storage_id;
if (local_v>V_th)
{
spikeTime = FindSpikeTime( local_state);
storage_id = atomicAdd( pEventNo, 1);
// Store values
pFiringVal[storage_id].time = spikeTime;
pFiringVal[storage_id].index = index;
}
}
}
void EventDrivenMap::FindMinimumSpikeTime()
{
hipLaunchKernelGGL(( FindSpikeTimeKernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0, mpGlobalState, mpGlobalZone,
mTime+mDt, mpFiringVal, mpEventNo);
CUDA_CHECK_ERROR();
// Find minimum spike time
CUDA_CALL( hipMemcpy( mpHost_eventNo, mpEventNo, sizeof(int), hipMemcpyDeviceToHost));
if (mpHost_eventNo>0)
{
hipLaunchKernelGGL(( deviceReduceMinKernel), dim3(mNoBlocks),dim3(mNoThreads), 0, 0,
mpFiringVal, *mpEventNo, mpFiringValTemp);
CUDA_CHECK_ERROR();
hipLaunchKernelGGL(( deviceReduceMinKernel), dim3(1),dim3(mNoThreads), 0, 0,
mpFiringValTemp, mNoBlocks, mpFiringValTemp);
CUDA_CHECK_ERROR();
}
}
__inline__ __device__ EventDrivenMap::firing warpReduceMin( EventDrivenMap::firing val)
{
float dummyTime;
unsigned int dummyIndex;
for (int offset = warpSize/2; offset>0; offset/=2) {
dummyTime = __shfl_down( val.time, offset);
dummyIndex = __shfl_down( val.index, offset);
if (dummyTime<val.time)
{
val.time = dummyTime;
val.index = dummyIndex;
}
}
return val;
}
__inline__ __device__ struct EventDrivenMap::firing blockReduceMin( EventDrivenMap::firing val)
{
__shared__ EventDrivenMap::firing shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceMin( val);
if (lane==0) {
shared[wid] = val;
}
__syncthreads();
val = (threadIdx.x<blockDim.x/warpSize) ? shared[lane] : (EventDrivenMap::firing){100.0f,0};
if (wid==0) {
val = warpReduceMin( val);
}
return val;
}
__global__ void deviceReduceMinKernel( const EventDrivenMap::firing* in,
const unsigned int npts,
EventDrivenMap::firing* out)
{
float time = 1000000.0f;
struct EventDrivenMap::firing dummy;
struct EventDrivenMap::firing val;
//reduce multiple elements per thread
for (int i=blockIdx.x*blockDim.x+threadIdx.x;i<npts;i+=blockDim.x*gridDim.x)
{
dummy = in[i];
if (dummy.time < time)
{
val = dummy;
time = dummy.time;
}
}
val = blockReduceMin( val);
if (threadIdx.x==0)
{
out[blockIdx.x] = val;
}
}
| c1d842c25819f7dfdd187a04ca78b229ac6bd1e6.cu | /* Code to do one dimensional spiking model from Mayte's note */
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <curand.h>
#include "parameters.h"
#include "EventDrivenMap.hpp"
#include "vector_types.h"
#define CUDA_ERROR_CHECK
#define CUDA_CALL( err) __cudaCall( err, __FILE__, __LINE__ )
#define CUDA_CHECK_ERROR() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
}
inline void __curandCall( curandStatus_t err, const char *file, const int line)
{
#ifdef CURAND_ERROR_CHECK
if ( CURAND_STATUS_SUCCESS != err)
{
fprintf( stderr, "curandCall() failed at %s:%i",
file, line);
exit( -1 );
}
#endif
}
EventDrivenMap::EventDrivenMap( const ParameterList* pParameterList)
{
mNetworkSize = (*pParameterList).networkSize;
mNoThreads = (*pParameterList).noThreads;
mNoBlocks = (mNetworkSize+mNoThreads-1)/mNoThreads;
mDomainSize = (*pParameterList).domainSize;
mDx = mDomainSize/(mNetworkSize-1);
mDt = (*pParameterList).timestep;
CUDA_CALL( cudaMalloc( &mpGlobalState, mNetworkSize*mNetworkSize*sizeof(float4)));
CUDA_CALL( cudaMalloc( &mpRefractTime, mNetworkSize*mNetworkSize*sizeof(float)));
CUDA_CALL( cudaMalloc( &mpGlobalZone, mNetworkSize*mNetworkSize*sizeof(short)));
CUDA_CALL( cudaMalloc( &mpFiringVal, mNetworkSize*mNetworkSize*sizeof(firing)));
CUDA_CALL( cudaMalloc( &mpFiringValTemp, mNoBlocks*sizeof(firing)));
CUDA_CALL( cudaMalloc( &mpEventNo, sizeof(int)));
CUDA_CALL( cudaMallocHost( &mpHost_eventNo, sizeof(int)));
}
EventDrivenMap::~EventDrivenMap()
{
cudaFree( mpGlobalState);
cudaFree( mpGlobalZone);
cudaFree( mpRefractTime);
cudaFree( mpFiringVal);
cudaFree( mpFiringValTemp);
cudaFree( mpEventNo);
cudaFree( mpHost_eventNo);
}
void EventDrivenMap::SimulateNetwork( const float finalTime)
{
InitialiseNetwork();
while (mTime<finalTime)
{
SimulateStep();
}
}
void EventDrivenMap::InitialiseNetwork()
{
mTime = 0.0f;
InitialiseNetworkKernel<<<mNoBlocks,mNoThreads>>>( mpGlobalState,
mpGlobalState, mpFiringVal);
CUDA_CHECK_ERROR();
ResetMemoryKernel<<<mNoBlocks,mNoThreads>>>( mpFiringVal, mNetworkSize, mDt);
CUDA_CHECK_ERROR();
}
__global__ void InitialiseNetworkKernel( float4* pGlobalState,
unsigned int* pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
if (index<networkSize)
{
pGlobalState = "something";
pGlobalZone = "something";
}
}
void EventDrivenMap::SimulateStep()
{
mTime = 0.0f;
while (mTime<mDt)
{
// First, find spiking cell
FindMinimumSpikeTime();
// Update all cells
UpdateZone1Kernel<<<mNoBlocks,mNoThreads>>>( (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone);
CUDA_CHECK_ERROR();
UpdateZone2Kernel<<<mNoBlocks,mNoThreads>>>( (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone);
CUDA_CHECK_ERROR();
UpdateZone3Kernel<<<mNoBlocks,mNoThreads>>>( (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone);
CUDA_CHECK_ERROR();
UpdateZone4Kernel<<<mNoBlocks,mNoThreads>>>( (*mpFiringValTemp)[0].time,
mpGlobalState, mpGlobalZone, mpRefractTime);
CUDA_CHECK_ERROR();
// Update time
mTime += (*mpSpikingCell).time;
// Reset neuron that fired
ApplyResetKernel<<<mNoBlocks,mNoThreads>>>( mpGlobalState, (*mpSpikingCell).index);
CUDA_CHECK_ERROR();
// Reset memory
ResetMemoryKernel<<<mNoThreads,mNoThreads>>>( mpFiringVal, mNetworkSize);
CUDA_CHECK_ERROR();
}
}
__global__ void UpdateZone1Kernel( const float eventTime,
float4* pGlobalState,
unsigned int *pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
local_state = UpdateZone1( eventTime, local_state);
pGlobalState[index] = local_state;
pGlobalZone[index] += (local_state.x>v_left);
}
}
__device__ float4 UpdateZone1( float eventTime,
float4 state)
{
float crossTime = eventTime;
unsigned short changeZoneFlag = 0;
float v = fun1( crossTime, state.x, state.y, state.z, state.w);
if (v > v_left)
{
changeZoneFlag = 1;
float dv = dfun1( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun1( crossTime, state.x, state.y, state.z, state.w);
dv = dfun1( crossTime, state.x, state.y, state.z, state.w);
}
}
state = UpdateStateZone1( crossTime, state);
if (changeZoneFlag)
{
state.x = v_left;
state = UpdateStateZone2( stepTime-crossTime, state);
}
return state;
}
__device__ float4 UpdateStateZone1( float t, float4 state)
{
state.x = (state.x*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau));
state.y = +(beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void UpdateZone2Kernel( const float eventTime,
float4* pGlobalState,
unsigned int *pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int local_zone = pGlobalZone[index];
bool correct_zone = (local_zone == 2);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
// Update state
local_state = UpdateZone2( eventTime, local_state);
pGlobalState[index] = local_state;
// Update zone
local_zone += (local_state.x>v_right);
local_zone -= (local_state.x<v_left);
pGlobalZone[index] = local_zone;
}
}
__device__ float4 UpdateZone2( float eventTime,
float4 state)
{
float crossTime = eventTime;
unsigned short changeZoneFlag = 0;
float v = fun2( crossTime, state.x, state.y, state.z, state.w, 0.0f);
if (v > V_right)
{
changeZoneFlag = 1;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_right);
float dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_right);
dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
}
}
if (v < V_left)
{
changeZoneFlag = -1;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_left);
float dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun2( crossTime, state.x, state.y, state.z, state.w, V_left);
dv = dfun2( crossTime, state.x, state.y, state.z, state.w);
}
}
state = UpdateStateZone2( crossTime, state);
if (changeZoneFlag==1)
{
state.x = V_right;
state = UpdateStateZone3( stepTime-crossTime, state);
}
if (changeZoneFlag==-1)
{
state.x = V_left;
state = UpdateStateZone1( stepTime-crossTime, state);
}
return state;
}
__device__ float4 UpdateStateZone2( float t, float4 state)
{
state.x =
(zone==2)*(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau));
state.y =
(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void UpdateZone3Kernel( const float eventTime,
float4* pGlobalState,
unsigned int *pGlobalZone)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 3);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
// Update state
local_state = UpdateZone3( eventTime, local_state);
pGlobalState[index] = local_state;
// Update zone
pGlobalZone[index] -= (local_state.x<v_right);
}
}
__device__ float4 UpdateZone3( float eventTime,
float4 state)
{
float crossTime = eventTime;
unsigned short changeZoneFlag = 0;
float v = fun3( crossTime, state.x, state.y, state.z, state.w, 0.0f);
if (v < V_right)
{
changeZoneFlag = 1;
v = fun3( crossTime, state.x, state.y, state.z, state.w, V_right);
float dv = dfun3( crossTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol)
{
crossTime -= v/dv;
v = fun3( crossTime, state.x, state.y, state.z, state.w, V_right);
dv = dfun3( crossTime, state.x, state.y, state.z, state.w);
}
}
state = UpdateStateZone3( crossTime, state);
if (changeZoneFlag)
{
state.x = V_right;
state = UpdateStateZone2( stepTime-crossTime, state);
}
return state;
}
__device__ float4 UpdateStateZone3( float t, float4 state)
{
state.x =
(state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau));
state.y =
(beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void UpdateZone4Kernel( const float eventTime,
float4 pGlobalState,
unsigned int pGlobalZone,
float pRefractTime)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 4);
if (correct_zone)
{
// Update state
float4 local_state = pGlobalZone[index];
float local_refract_time = pRefractTime[index];
float sim_time = min( local_refract_time, eventTime);
local_state = UpdateStateZone4( sim_time, local_state);
local_refract_time -= eventTime;
local_refract_time *= (-1);
if (local_refract_time>0.0f)
{
local_state = UpdateStateZone2( local_refract_time, local_state);
}
// Update zone
local_refract_time *= (-1);
if (local_refract_timea > 0.0f)
{
local_refract_time = 0.0f;
pGlobalZone[index] = 2;
}
pRefractTime[index] = local_refract_time;
}
}
__device__ float4 UpdateStateZone4( float t, float4 state)
{
state.x = V_r;
state.y =
(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)));
state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
state.w = state.w*expf(-alpha*t);
return state;
}
__global__ void ResetMemoryKernel( firing pFiringVal, const unsigned int networkSize, const float stepSize)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
if (index<networkSize)
{
pFiringVal[index].time = mDt;
pFiringVal[index].index = 0;
}
}
//----------------------------------------------------------------------------
__global__ void updateZone4Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone4( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__inline__ __device__ float4 updateZone1( float4 state, float t)
{
float4 temp_state = state;
temp_state.x = state.x*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau);
temp_state.y = beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h);
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateState( float4 state, float t, unsigned short zone)
{
float4 temp_state = state;
temp_state.x =
(zone==2)*(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau))
+(zone==1)*(state.x*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau))
+(zone==3)*(state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau))
+(zone==4)*V_r;
temp_state.y =
((zone==2)|(zone==4))*(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)))
+((zone==1)|(zone==3))*(beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h));
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateZone2( float4 state, float t)
{
float4 temp_state = state;
temp_state.x =
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau);
temp_state.y =
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0));
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateZone3( float4 state, float t)
{
float4 temp_state = state;
temp_state.x = state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau);
temp_state.y = beta_left*(1.0f-expf(-t/tau_h))+state.y*expf(-t/tau_h);
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__inline__ __device__ float4 updateZone4( float4 state, float t)
{
float4 temp_state = state;
temp_state.x = V_r;
temp_state.y =
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(state.y+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-gamma_centre*tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-state.x+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*state.w*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*state.z*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*state.z*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0));
temp_state.z = (state.z+alpha*state.w*t)*expf(-alpha*t);
temp_state.w = state.w*expf(-alpha*t);
return temp_state;
}
__device__ float fun1( float t, float v0, float n0, float u0, float y0)
{
return v0*expf(-t/tau)
+(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2)
-(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2)
-I+I*expf(-t/tau)-V_left;
}
__device__ float fun2( float t, float v0, float n0, float u0, float y0, float thresh)
{
return
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau)
- thresh;
}
__device__ float fun3( float t, float v0, float n0, float u0, float y0, float thresh)
{
return v0*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2)
-(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2)
-I+I*expf(-t/tau) - thresh;
}
__device__ float dfun1( float t, float v0, float n0, float u0, float y0)
{
return
(beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h));
}
__device__ float dfun2( float t, float v0, float n0, float u0, float y0)
{
return
1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/gamma_centre-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-(alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau+(I*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau-(gs*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)-(I*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau_h)-(beta_centre*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau)+(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(alpha*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+I*gamma_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-gamma_centre*gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau_h+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau);
}
__device__ float dfun3( float t, float v0, float n0, float u0, float y0)
{
return
(beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h));
}
__device__ float eventTimeZone1( float v0, float n0, float u0, float y0)
{
float f, df, estimatedTime = 0.0f;
f = fun1( estimatedTime, v0, n0, u0, y0);
df = dfun1( estimatedTime, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTime -= f/df;
f = fun1( estimatedTime, v0, n0, u0, y0);
df = dfun1( estimatedTime, v0, n0, u0, y0);
}
return estimatedTime;
}
__device__ void eventTimeZone2( float v0, float n0, float u0, float y0,
float *t, unsigned short *cross)
{
float f, df;
float estimatedTimeLeft = 0.0f;
float estimatedTimeRight = 0.0f;
f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left);
df = dfun2( estimatedTimeLeft, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeLeft -= f/df;
f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left);
df = dfun2( estimatedTimeLeft, v0, n0, u0, y0);
}
f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right);
df = dfun2( estimatedTimeRight, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeRight -= f/df;
f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right);
df = dfun2( estimatedTimeRight, v0, n0, u0, y0);
}
*cross = 2;
if (estimatedTimeRight<estimatedTimeLeft)
{
estimatedTimeLeft = estimatedTimeRight;
*cross = 3;
}
*t = estimatedTimeLeft;
}
__device__ void eventTimeZone3( float v0, float n0, float u0, float y0,
float *t, unsigned short *cross)
{
float f, df;
float estimatedTimeLeft = 0.0f;
float estimatedTimeRight = 0.0f;
f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right);
df = dfun3( estimatedTimeLeft, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeLeft -= f/df;
f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right);
df = dfun3( estimatedTimeLeft, v0, n0, u0, y0);
}
f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th);
df = dfun3( estimatedTimeRight, v0, n0, u0, y0);
while (fabs(f)>tol) {
estimatedTimeRight -= f/df;
f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th);
df = dfun3( estimatedTimeRight, v0, n0, u0, y0);
}
*cross = 4;
if (estimatedTimeRight<estimatedTimeLeft)
{
estimatedTimeLeft = estimatedTimeRight;
*cross = 5;
}
*t = estimatedTimeLeft;
}
__global__ void eventTimeZone1Kernel( const float4* pGlobal_state,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x + blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
(*pVal).time = 100000.0f;
if (correct_zone)
{
float4 local_state = pGlobal_state[k];
pVal[k].time = eventTimeZone1(local_state.x,local_state.y,local_state.z,local_state.w);
pVal[k].index = k;
pVal[k].cross = 1;
}
}
__global__ void eventTimeZone2Kernel( const float4* pGlobal_state,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 2);
float4 local_state = pGlobal_state[k];
float local_time = 1000000.0f;
unsigned short cross;
if (correct_zone)
{
eventTimeZone2(local_state.x,local_state.y,local_state.z,local_state.w,&local_time,&cross);
pVal[k].time = local_time;
pVal[k].index = k;
pVal[k].cross = cross;
}
}
__global__ void eventTimeZone3Kernel( const float4* pGlobal_state,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 3);
float4 local_state = pGlobal_state[k];
float local_time = 1000000.0f;
unsigned short cross;
if (correct_zone)
{
eventTimeZone3(local_state.x,local_state.y,local_state.z,local_state.w,&local_time,&cross);
pVal[k].time = local_time;
pVal[k].index = k;
pVal[k].cross = cross;
}
}
__global__ void eventTimeZone4Kernel( const float *pRefractTime,
const unsigned short* pGlobalZone,
struct firing* pVal)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 4);
if (correct_zone)
{
pVal[k].time = pRefractTime[k];
pVal[k].index = k;
pVal[k].cross = 6;
}
}
__global__ void updateStateKernel( float4* pGlobalState,
float* pRefractTimes,
const unsigned short* pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
float4 local_state = pGlobalState[k];
float refract_time = pRefractTimes[k];
local_state = updateState( local_state, eventTime, pGlobalZone[k]);
refract_time -= eventTime;
pGlobalState[k] = local_state;
pRefractTimes[k] = refract_time*(refract_time>0.0f);
}
__global__ void updateZone1Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone1( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void updateZone2Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone2( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void updateZone3Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone3( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void updateZone4Kernel( float4* pGlobalState,
const unsigned int *pGlobalZone,
const float eventTime)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[k] == 1);
if (correct_zone)
{
float4 local_state = pGlobalState[k];
local_state = updateZone4( local_state, eventTime);
pGlobalState[k] = local_state;
}
}
__global__ void ApplyResetKernel( float4* pGlobalState,
unsigned int* pGlobalZone,
firing* pFiringVal,
float* pRefractTime)
{
int k = threadIdx.x+blockDim.x*blockIdx.x+index-spatial_extent;
unsigned int index = (*pFiringVal).index;
if ((k>=0)&&(k<N))
{
pGlobalState[k].w += alpha*W*dx;
}
if (threadIdx.x==0)
{
pGlobalState[index].x = V_r;
pGlobalZone [index] = 2;
pRefractTime[index] = tau_r;
}
}
__global__ void InitialiseKernel( float4* pGlobalState,
unsigned short* pGlobalZone)
{
unsigned int k = threadIdx.x+blockDim.x*blockIdx.x;
float4 local_state = (float4){0.0f,0.0f,0.0f,0.0f};
pGlobalState[k] = local_state;
pGlobalZone[k] = 2;
}
void updateZones( const struct firing* val,
unsigned short* pGlobalZone,
float4* pGlobalState,
float* pRefractTime)
{
unsigned int cross = (*val).cross;
unsigned int index = (*val).index;
unsigned short new_zone;
switch (cross)
{
case 1 :
new_zone = 2;
break;
case 2 :
new_zone = 1;
break;
case 3 :
new_zone = 3;
break;
case 4 :
new_zone = 2;
break;
case 5 :
new_zone = 4;
break;
case 6 :
new_zone = 2;
break;
}
// Update zone of neuron that fired
CUDA_CALL( cudaMemcpy( pGlobalZone+index, &new_zone, sizeof(short),
cudaMemcpyHostToDevice));
// If cell fired, need to reset voltage and send out synaptic input
if (cross==5)
{
ApplyResetKernel<<<(2*spatial_extent+noThreads-1)/noThreads,noThreads>>>
( pGlobalState, index, pRefractTime);
}
}
int main( int argc , char *argv[])
{
// Allocate memory
float4* p_global_state;
float* p_refract_time;
struct firing* p_firing_val;
struct firing* p_firing_val_temp;
struct firing* p_firing_pinned;
unsigned short* p_global_zone;
// Allocate memory
CUDA_CALL( cudaMalloc( &p_global_state, N*sizeof(float4)));
CUDA_CALL( cudaMalloc( &p_refract_time, N*sizeof(float)));
CUDA_CALL( cudaMalloc( &p_global_zone, N*sizeof(short)));
CUDA_CALL( cudaMalloc( &p_firing_val, N*sizeof(firing)));
CUDA_CALL( cudaMalloc( &p_firing_val_temp, noBlocks*sizeof(firing)));
// Pinned memory
CUDA_CALL( cudaMallocHost( (void**) &p_firing_pinned, sizeof(firing)));
InitialiseKernel<<<noBlocks,noThreads>>>( p_global_state, p_global_zone);
float final_time = 100.0f;
float current_time = 0.0f;
while (current_time<final_time)
{
eventTimeZone1Kernel<<<noBlocks,noThreads>>>
(p_global_state,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
eventTimeZone2Kernel<<<noBlocks,noThreads>>>
(p_global_state,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
eventTimeZone3Kernel<<<noBlocks,noThreads>>>
(p_global_state,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
eventTimeZone4Kernel<<<noBlocks,noThreads>>>
( p_refract_time,
p_global_zone,
p_firing_val);
CUDA_CHECK_ERROR();
// Find minimum spike time
deviceReduceMinKernel<<<noBlocks,noThreads>>>
( p_firing_val, N, p_firing_val_temp);
CUDA_CHECK_ERROR();
deviceReduceMinKernel<<<1,noThreads>>>
( p_firing_val_temp, noBlocks, p_firing_val_temp);
CUDA_CHECK_ERROR();
// Update - assume transfer to page-locked memory
CUDA_CALL( cudaMemcpy( p_firing_pinned, p_firing_val, sizeof(firing),
cudaMemcpyDeviceToHost));
updateStateKernel<<<noBlocks,noThreads>>>
( p_global_state, p_refract_time, p_global_zone, (*p_firing_pinned).time);
CUDA_CHECK_ERROR();
// Update zones
updateZones( p_firing_pinned, p_global_zone, p_global_state, p_refract_time);
// Update time
current_time += (*p_firing_pinned).time;
}
cudaFree( p_global_state);
cudaFree( p_global_zone);
cudaFree( p_refract_time);
cudaFree( p_firing_val);
cudaFree( p_firing_val_temp);
cudaFreeHost( p_firing_pinned);
}
__device__ float UpdateVoltageZone3( const float4 state, const float t)
{
return (state.x*expf(-t/tau)
+(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h)
-(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h)
-(gh*state.y*tau_h*expf(-t/tau_h))/(tau-tau_h)
+(gh*state.y*tau_h*expf(-t/tau))/(tau-tau_h)
+(gs*expf(-alpha*t)*(alpha*tau*state.z-state.z-alpha*t*state.w+alpha*tau*state.w+alpha*alpha*t*tau*state.w))/powf(alpha*tau-1.0f,2)
-(gs*expf(-t/tau)*(alpha*tau*state.z-state.z+alpha*tau*state.w))/powf(alpha*tau-1.0f,2)
-I +I*exp(-t/tau));
}
//-------------------------------------------------------------------------------------------
__device__ float FindSpikeTime( const float4 state)
{
float spikeTime = 0.0f;
float f, df;
f = fun3( spikeTime, state.x, state.y, state.z, state.w, V_th);
df = dfun3( spikeTime, state.x, state.y, state.z, state.w);
while (fabs(f)>tol) {
spikeTime -= f/df;
f = fun3( spikeTime, state.x, state.y, state.z, state.w, V_th);
df = dfun3( spikeTime, state.x, state.y, state.z, state.w);
}
return spikeTime;
}
__global__ void FindSpikeTimeKernel( const float4* pGlobalState,
const short* pGlobalZone,
const float stepTime,
EventDrivenMap::firing* pFiringVal,
unsigned int* pEventNo)
{
unsigned int index = threadIdx.x+blockDim.x*blockIdx.x;
bool correct_zone = (pGlobalZone[index] == 3);
if (correct_zone)
{
float4 local_state = pGlobalState[index];
float local_v = fun3( stepTime, local_state.x, local_state.y, local_state.z, local_state.z, 0.0f);
float spikeTime = stepTime;
unsigned int storage_id;
if (local_v>V_th)
{
spikeTime = FindSpikeTime( local_state);
storage_id = atomicAdd( pEventNo, 1);
// Store values
pFiringVal[storage_id].time = spikeTime;
pFiringVal[storage_id].index = index;
}
}
}
void EventDrivenMap::FindMinimumSpikeTime()
{
FindSpikeTimeKernel<<<mNoBlocks,mNoThreads>>>( mpGlobalState, mpGlobalZone,
mTime+mDt, mpFiringVal, mpEventNo);
CUDA_CHECK_ERROR();
// Find minimum spike time
CUDA_CALL( cudaMemcpy( mpHost_eventNo, mpEventNo, sizeof(int), cudaMemcpyDeviceToHost));
if (mpHost_eventNo>0)
{
deviceReduceMinKernel<<<mNoBlocks,mNoThreads>>>
( mpFiringVal, *mpEventNo, mpFiringValTemp);
CUDA_CHECK_ERROR();
deviceReduceMinKernel<<<1,mNoThreads>>>
( mpFiringValTemp, mNoBlocks, mpFiringValTemp);
CUDA_CHECK_ERROR();
}
}
__inline__ __device__ EventDrivenMap::firing warpReduceMin( EventDrivenMap::firing val)
{
float dummyTime;
unsigned int dummyIndex;
for (int offset = warpSize/2; offset>0; offset/=2) {
dummyTime = __shfl_down( val.time, offset);
dummyIndex = __shfl_down( val.index, offset);
if (dummyTime<val.time)
{
val.time = dummyTime;
val.index = dummyIndex;
}
}
return val;
}
__inline__ __device__ struct EventDrivenMap::firing blockReduceMin( EventDrivenMap::firing val)
{
__shared__ EventDrivenMap::firing shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceMin( val);
if (lane==0) {
shared[wid] = val;
}
__syncthreads();
val = (threadIdx.x<blockDim.x/warpSize) ? shared[lane] : (EventDrivenMap::firing){100.0f,0};
if (wid==0) {
val = warpReduceMin( val);
}
return val;
}
__global__ void deviceReduceMinKernel( const EventDrivenMap::firing* in,
const unsigned int npts,
EventDrivenMap::firing* out)
{
float time = 1000000.0f;
struct EventDrivenMap::firing dummy;
struct EventDrivenMap::firing val;
//reduce multiple elements per thread
for (int i=blockIdx.x*blockDim.x+threadIdx.x;i<npts;i+=blockDim.x*gridDim.x)
{
dummy = in[i];
if (dummy.time < time)
{
val = dummy;
time = dummy.time;
}
}
val = blockReduceMin( val);
if (threadIdx.x==0)
{
out[blockIdx.x] = val;
}
}
|
efbf214d664be947888cad0a821b979df70c1d6e.hip | // !!! This is a file automatically generated by hipify!!!
// Added by Karel Adamek
#ifndef MSD_BLN_PW_KERNEL_H_
#define MSD_BLN_PW_KERNEL_H_
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "headers/params.h"
/*
//----------------------------------------------------------------------------------------
//------------- Device functions
__device__ __inline__ void Initiate(float *M, float *S, float *j, float element){
*M = element;
*S = 0;
*j = 1.0f;
}
__device__ __inline__ void Add_one(float *M, float *S, float *j, float element){
float ftemp;
*j = (*j) + 1.0f;
*M = (*M) + element;
ftemp = ( (*j)*element - (*M) );
*S = (*S) + 1.0f / ( (*j)*( (*j) - 1.0f ) )*ftemp*ftemp;
}
__device__ __inline__ void Merge(float *A_M, float *A_S, float *A_j, float B_M, float B_S, float B_j){
float ftemp;
ftemp = ( B_j / (*A_j)*(*A_M) - B_M );
(*A_S) = (*A_S) + B_S + ( (*A_j) / ( B_j*( (*A_j) + B_j ) ) )*ftemp*ftemp;
(*A_M) = (*A_M) + B_M;
(*A_j) = (*A_j) + B_j;
}
__device__ __inline__ void Reduce_SM(float *M, float *S, float *j, float *s_input){
float jv;
(*M)=s_input[threadIdx.x];
(*S)=s_input[blockDim.x + threadIdx.x];
(*j)=s_input[2*blockDim.x + threadIdx.x];
for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) {
if (threadIdx.x < i) {
jv = s_input[2*blockDim.x + i + threadIdx.x];
if( ((int) jv)!=0){
if( (*j)==0 ){
(*S) = s_input[blockDim.x + i + threadIdx.x];
(*M) = s_input[i + threadIdx.x];
(*j) = jv;
}
else {
Merge(M, S, j, s_input[i + threadIdx.x], s_input[blockDim.x + i + threadIdx.x], jv);
}
}
s_input[threadIdx.x] = (*M);
s_input[blockDim.x + threadIdx.x] = (*S);
s_input[2*blockDim.x + threadIdx.x] = (*j);
}
__syncthreads();
}
}
__device__ __inline__ void Reduce_SM_regular(float *M, float *S, float *j, float *s_input){
(*M)=s_input[threadIdx.x];
(*S)=s_input[blockDim.x + threadIdx.x];
(*j)=s_input[2*blockDim.x + threadIdx.x];
for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) {
if (threadIdx.x < i) {
Merge(M, S, j, s_input[i + threadIdx.x], s_input[blockDim.x + i + threadIdx.x], s_input[2*blockDim.x + i + threadIdx.x]);
s_input[threadIdx.x] = (*M);
s_input[blockDim.x + threadIdx.x] = (*S);
s_input[2*blockDim.x + threadIdx.x] = (*j);
}
__syncthreads();
}
}
__device__ __inline__ void Reduce_WARP(float *M, float *S, float *j){
float jv;
for (int q = HALF_WARP; q > 0; q = q >> 1) {
jv = __shfl_down((*j), q);
if(jv!=0){
if( (*j)==0 ) {
(*S) = __shfl_down((*S), q);
(*M) = __shfl_down((*M), q);
(*j) = jv;
}
else {
Merge(M, S, j, __shfl_down((*M), q), __shfl_down((*S), q), jv);
}
}
}
}
__device__ __inline__ void Reduce_WARP_regular(float *M, float *S, float *j){
for (int q = HALF_WARP; q > 0; q = q >> 1) {
Merge(M, S, j, __shfl_down((*M), q), __shfl_down((*S), q), __shfl_down((*j), q));
}
}
//------------- Device functions
//----------------------------------------------------------------------------------------
*/
__global__ void MSD_BLN_pw_no_rejection(float const* __restrict__ d_input, float *d_output, int y_steps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
float M, S, j, ftemp;
int spos = blockIdx.x*MSD_PW_NTHREADS + threadIdx.x;
int gpos = blockIdx.y*y_steps*nTimesamples + spos;
M=0; S=0; j=0;
if( spos<(nTimesamples-offset) ){
ftemp=__ldg(&d_input[gpos]);
Initiate( &M, &S, &j, ftemp);
gpos = gpos + nTimesamples;
for (int yf = 1; yf < y_steps; yf++) {
ftemp=__ldg(&d_input[gpos]);
Add_one( &M, &S, &j, ftemp);
gpos = gpos + nTimesamples;
}
}
s_input[threadIdx.x] = M;
s_input[blockDim.x + threadIdx.x] = S;
s_input[2*blockDim.x + threadIdx.x] = j;
__syncthreads();
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output[3*gpos] = M;
d_output[3*gpos + 1] = S;
d_output[3*gpos + 2] = j;
}
}
__global__ void MSD_BLN_pw_rejection_normal(float const* __restrict__ d_input, float *d_output, float *d_MSD, int y_steps, int nTimesamples, int offset, float bln_sigma_constant) {
__shared__ float s_input[3*PD_NTHREADS];
float M, S, j, ftemp, signal_mean, signal_sd;
signal_mean = d_MSD[0];
signal_sd = d_MSD[1];
int spos = blockIdx.x*PD_NTHREADS + threadIdx.x;
int gpos = blockIdx.y*y_steps*nTimesamples + spos;
M=0; S=0; j=0;
if( spos<(nTimesamples-offset) ){
for (int yf = 0; yf < y_steps; yf++) {
ftemp=__ldg(&d_input[gpos]);
if( (ftemp > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j==0){
Initiate( &M, &S, &j, ftemp);
}
else{
Add_one( &M, &S, &j, ftemp);
}
}
gpos = gpos + nTimesamples;
}
}
s_input[threadIdx.x] = M;
s_input[blockDim.x + threadIdx.x] = S;
s_input[2*blockDim.x + threadIdx.x] = j;
__syncthreads();
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output[3*gpos] = M;
d_output[3*gpos + 1] = S;
d_output[3*gpos + 2] = j;
}
}
__global__ void MSD_GPU_LA_ALL_no_rejection(float const* __restrict__ d_input, float *d_output, float *d_output_taps, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; j=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Calculate FIR of 4 taps
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Initiate( &M_b, &S_b, &j_b, ftemp1.x );
Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
Initiate( &M, &S, &j, Bw[0] );
Add_one( &M, &S, &j, Bw[1] );
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Add_one( &M_b, &S_b, &j_b, ftemp1.x );
Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
Add_one( &M, &S, &j, Bw[0] );
Add_one( &M, &S, &j, Bw[1] );
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_LA_ALL_Nth_no_rejection(float const* __restrict__ d_input, float const* __restrict__ d_bv_in, float *d_output, float *d_output_taps, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; M=0; S=0; M_b=0; S_b=0; j=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Calculate FIR of 4 taps
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_LA_ALL_pw_rejection(float const* __restrict__ d_input, float *d_output, float *d_output_taps, float *d_MSD_T, float *d_MSD_T_base, float bln_sigma_constant, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
float signal_mean = d_MSD_T_base[0];
float signal_sd = d_MSD_T_base[1];
float signal_mean_taps = d_MSD_T[0];
float signal_sd_taps = d_MSD_T[1];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; j=0; j_b=0;
M=0; S=0; j=0;
M_b=0; S_b=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Initialization of MSD variables for non-processed StrDev
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
if( (ftemp1.x > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.x < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.x );
else Add_one( &M_b, &S_b, &j_b, ftemp1.x );
}
if( (ftemp1.y > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.y < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.y );
else Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
if( (Bw[0] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[0] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[0] );
else Add_one( &M, &S, &j, Bw[0] );
}
if( (Bw[1] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[1] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[1] );
else Add_one( &M, &S, &j, Bw[1] );
}
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
if( (ftemp1.x > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.x < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.x );
else Add_one( &M_b, &S_b, &j_b, ftemp1.x );
}
if( (ftemp1.y > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.y < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.y );
else Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
if( (Bw[0] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[0] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[0] );
else Add_one( &M, &S, &j, Bw[0] );
}
if( (Bw[1] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[1] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[1] );
else Add_one( &M, &S, &j, Bw[1] );
}
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_LA_ALL_Nth_pw_rejection(float const* __restrict__ d_input, float const* __restrict__ d_bv_in, float *d_output, float *d_output_taps, float *d_MSD_T, float *d_MSD_T_base, float bln_sigma_constant, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
//float signal_mean = d_MSD_T_base[0];
//float signal_sd = d_MSD_T_base[1];
float signal_mean_taps = d_MSD_T[0];
float signal_sd_taps = d_MSD_T[1];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; j=0; j_b=0;
M=0; S=0; j=0;
M_b=0; S_b=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Calculate FIR of 4 taps
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
//if( (__ldg(&d_bv_in[gpos]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
//}
//if( (__ldg(&d_bv_in[gpos+1]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos+1]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
//}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
if( ( (__ldg(&d_bv_in[gpos]) + Bw[0]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos]) + Bw[0]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
}
if( ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
}
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
//if( (__ldg(&d_bv_in[gpos]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
//}
//if( (__ldg(&d_bv_in[gpos+1]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos+1]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
//}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
if( ( (__ldg(&d_bv_in[gpos]) + Bw[0]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos]) + Bw[0]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
}
if( ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
}
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_final_create_LA_nonregular(float *d_input, float *d_output, float *d_MSD_base, int nTaps, int size) {
__shared__ float s_input[3*WARP*WARP];
float M, S, j;
Sum_partials_nonregular( &M, &S, &j, d_input, s_input, size);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
d_output[0] = d_MSD_base[0];
d_output[1] = d_MSD_base[1];
d_output[2] = (sqrt(S / j) - d_MSD_base[1])/( (float) (nTaps-1));
}
}
__global__ void MSD_GPU_final_create_LA_Nth_nonregular(float *d_input, float *d_output, float *d_MSD_base, float *d_MSD_DIT, int nTaps, int size, int DIT_value) {
__shared__ float s_input[3*WARP*WARP];
float M, S, j;
Sum_partials_nonregular( &M, &S, &j, d_input, s_input, size);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
d_output[0] = d_MSD_base[0];
d_output[1] = d_MSD_base[1];
d_output[2] = (sqrt(S / j) - d_MSD_base[1])/( (float) (nTaps));
d_output[3] = d_MSD_DIT[0]*DIT_value;//*DIT_value
}
}
#endif
| efbf214d664be947888cad0a821b979df70c1d6e.cu | // Added by Karel Adamek
#ifndef MSD_BLN_PW_KERNEL_H_
#define MSD_BLN_PW_KERNEL_H_
#include <cuda.h>
#include <cuda_runtime.h>
#include "headers/params.h"
/*
//----------------------------------------------------------------------------------------
//------------- Device functions
__device__ __inline__ void Initiate(float *M, float *S, float *j, float element){
*M = element;
*S = 0;
*j = 1.0f;
}
__device__ __inline__ void Add_one(float *M, float *S, float *j, float element){
float ftemp;
*j = (*j) + 1.0f;
*M = (*M) + element;
ftemp = ( (*j)*element - (*M) );
*S = (*S) + 1.0f / ( (*j)*( (*j) - 1.0f ) )*ftemp*ftemp;
}
__device__ __inline__ void Merge(float *A_M, float *A_S, float *A_j, float B_M, float B_S, float B_j){
float ftemp;
ftemp = ( B_j / (*A_j)*(*A_M) - B_M );
(*A_S) = (*A_S) + B_S + ( (*A_j) / ( B_j*( (*A_j) + B_j ) ) )*ftemp*ftemp;
(*A_M) = (*A_M) + B_M;
(*A_j) = (*A_j) + B_j;
}
__device__ __inline__ void Reduce_SM(float *M, float *S, float *j, float *s_input){
float jv;
(*M)=s_input[threadIdx.x];
(*S)=s_input[blockDim.x + threadIdx.x];
(*j)=s_input[2*blockDim.x + threadIdx.x];
for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) {
if (threadIdx.x < i) {
jv = s_input[2*blockDim.x + i + threadIdx.x];
if( ((int) jv)!=0){
if( (*j)==0 ){
(*S) = s_input[blockDim.x + i + threadIdx.x];
(*M) = s_input[i + threadIdx.x];
(*j) = jv;
}
else {
Merge(M, S, j, s_input[i + threadIdx.x], s_input[blockDim.x + i + threadIdx.x], jv);
}
}
s_input[threadIdx.x] = (*M);
s_input[blockDim.x + threadIdx.x] = (*S);
s_input[2*blockDim.x + threadIdx.x] = (*j);
}
__syncthreads();
}
}
__device__ __inline__ void Reduce_SM_regular(float *M, float *S, float *j, float *s_input){
(*M)=s_input[threadIdx.x];
(*S)=s_input[blockDim.x + threadIdx.x];
(*j)=s_input[2*blockDim.x + threadIdx.x];
for (int i = ( blockDim.x >> 1 ); i > HALF_WARP; i = i >> 1) {
if (threadIdx.x < i) {
Merge(M, S, j, s_input[i + threadIdx.x], s_input[blockDim.x + i + threadIdx.x], s_input[2*blockDim.x + i + threadIdx.x]);
s_input[threadIdx.x] = (*M);
s_input[blockDim.x + threadIdx.x] = (*S);
s_input[2*blockDim.x + threadIdx.x] = (*j);
}
__syncthreads();
}
}
__device__ __inline__ void Reduce_WARP(float *M, float *S, float *j){
float jv;
for (int q = HALF_WARP; q > 0; q = q >> 1) {
jv = __shfl_down((*j), q);
if(jv!=0){
if( (*j)==0 ) {
(*S) = __shfl_down((*S), q);
(*M) = __shfl_down((*M), q);
(*j) = jv;
}
else {
Merge(M, S, j, __shfl_down((*M), q), __shfl_down((*S), q), jv);
}
}
}
}
__device__ __inline__ void Reduce_WARP_regular(float *M, float *S, float *j){
for (int q = HALF_WARP; q > 0; q = q >> 1) {
Merge(M, S, j, __shfl_down((*M), q), __shfl_down((*S), q), __shfl_down((*j), q));
}
}
//------------- Device functions
//----------------------------------------------------------------------------------------
*/
__global__ void MSD_BLN_pw_no_rejection(float const* __restrict__ d_input, float *d_output, int y_steps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
float M, S, j, ftemp;
int spos = blockIdx.x*MSD_PW_NTHREADS + threadIdx.x;
int gpos = blockIdx.y*y_steps*nTimesamples + spos;
M=0; S=0; j=0;
if( spos<(nTimesamples-offset) ){
ftemp=__ldg(&d_input[gpos]);
Initiate( &M, &S, &j, ftemp);
gpos = gpos + nTimesamples;
for (int yf = 1; yf < y_steps; yf++) {
ftemp=__ldg(&d_input[gpos]);
Add_one( &M, &S, &j, ftemp);
gpos = gpos + nTimesamples;
}
}
s_input[threadIdx.x] = M;
s_input[blockDim.x + threadIdx.x] = S;
s_input[2*blockDim.x + threadIdx.x] = j;
__syncthreads();
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output[3*gpos] = M;
d_output[3*gpos + 1] = S;
d_output[3*gpos + 2] = j;
}
}
__global__ void MSD_BLN_pw_rejection_normal(float const* __restrict__ d_input, float *d_output, float *d_MSD, int y_steps, int nTimesamples, int offset, float bln_sigma_constant) {
__shared__ float s_input[3*PD_NTHREADS];
float M, S, j, ftemp, signal_mean, signal_sd;
signal_mean = d_MSD[0];
signal_sd = d_MSD[1];
int spos = blockIdx.x*PD_NTHREADS + threadIdx.x;
int gpos = blockIdx.y*y_steps*nTimesamples + spos;
M=0; S=0; j=0;
if( spos<(nTimesamples-offset) ){
for (int yf = 0; yf < y_steps; yf++) {
ftemp=__ldg(&d_input[gpos]);
if( (ftemp > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j==0){
Initiate( &M, &S, &j, ftemp);
}
else{
Add_one( &M, &S, &j, ftemp);
}
}
gpos = gpos + nTimesamples;
}
}
s_input[threadIdx.x] = M;
s_input[blockDim.x + threadIdx.x] = S;
s_input[2*blockDim.x + threadIdx.x] = j;
__syncthreads();
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output[3*gpos] = M;
d_output[3*gpos + 1] = S;
d_output[3*gpos + 2] = j;
}
}
__global__ void MSD_GPU_LA_ALL_no_rejection(float const* __restrict__ d_input, float *d_output, float *d_output_taps, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; j=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Calculate FIR of 4 taps
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Initiate( &M_b, &S_b, &j_b, ftemp1.x );
Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
Initiate( &M, &S, &j, Bw[0] );
Add_one( &M, &S, &j, Bw[1] );
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Add_one( &M_b, &S_b, &j_b, ftemp1.x );
Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
Add_one( &M, &S, &j, Bw[0] );
Add_one( &M, &S, &j, Bw[1] );
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_LA_ALL_Nth_no_rejection(float const* __restrict__ d_input, float const* __restrict__ d_bv_in, float *d_output, float *d_output_taps, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; M=0; S=0; M_b=0; S_b=0; j=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Calculate FIR of 4 taps
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_LA_ALL_pw_rejection(float const* __restrict__ d_input, float *d_output, float *d_output_taps, float *d_MSD_T, float *d_MSD_T_base, float bln_sigma_constant, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
float signal_mean = d_MSD_T_base[0];
float signal_sd = d_MSD_T_base[1];
float signal_mean_taps = d_MSD_T[0];
float signal_sd_taps = d_MSD_T[1];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; j=0; j_b=0;
M=0; S=0; j=0;
M_b=0; S_b=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Initialization of MSD variables for non-processed StrDev
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
if( (ftemp1.x > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.x < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.x );
else Add_one( &M_b, &S_b, &j_b, ftemp1.x );
}
if( (ftemp1.y > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.y < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.y );
else Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
if( (Bw[0] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[0] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[0] );
else Add_one( &M, &S, &j, Bw[0] );
}
if( (Bw[1] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[1] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[1] );
else Add_one( &M, &S, &j, Bw[1] );
}
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
if( (ftemp1.x > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.x < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.x );
else Add_one( &M_b, &S_b, &j_b, ftemp1.x );
}
if( (ftemp1.y > (signal_mean - bln_sigma_constant*signal_sd)) && (ftemp1.y < (signal_mean + bln_sigma_constant*signal_sd)) ){
if(j_b==0) Initiate( &M_b, &S_b, &j_b, ftemp1.y );
else Add_one( &M_b, &S_b, &j_b, ftemp1.y );
}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
if( (Bw[0] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[0] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[0] );
else Add_one( &M, &S, &j, Bw[0] );
}
if( (Bw[1] > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && (Bw[1] < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, Bw[1] );
else Add_one( &M, &S, &j, Bw[1] );
}
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_LA_ALL_Nth_pw_rejection(float const* __restrict__ d_input, float const* __restrict__ d_bv_in, float *d_output, float *d_output_taps, float *d_MSD_T, float *d_MSD_T_base, float bln_sigma_constant, int y_steps, int nTaps, int nTimesamples, int offset) {
__shared__ float s_input[3*MSD_PW_NTHREADS];
__shared__ float s_base[3*MSD_PW_NTHREADS];
// MSD variables
float M, S, j;
float M_b, S_b, j_b;
// FIR variables
int d, gpos, spos, local_id;
ushort EpT, limit;
float2 ftemp1, ftemp2, ftemp3;
float Bw[2];
//float signal_mean = d_MSD_T_base[0];
//float signal_sd = d_MSD_T_base[1];
float signal_mean_taps = d_MSD_T[0];
float signal_sd_taps = d_MSD_T[1];
EpT = 2*MSD_PW_NTHREADS-nTaps+4;
limit = blockDim.x - (nTaps>>2) - 1;
// First y coordinate is separated
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + spos;
Bw[0]=0; Bw[1]=0; j=0; j_b=0;
M=0; S=0; j=0;
M_b=0; S_b=0; j_b=0;
if( (spos+4)<(nTimesamples-offset) ){
// loading data for FIR filter. Each thread calculates two samples
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
// Calculate FIR of 4 taps
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
//if( (__ldg(&d_bv_in[gpos]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
//}
//if( (__ldg(&d_bv_in[gpos+1]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos+1]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
//}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
// Calculating FIT up to nTaps
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
// Note: threads with local_id<0 which have wrong result create sums as well but are removed from final results later
// same is for base values as these would be included twice. First time here and next time in threadblock next to it
// this is due to halo needed for FIR filter
if( ( (__ldg(&d_bv_in[gpos]) + Bw[0]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos]) + Bw[0]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
}
if( ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
}
// Rest of the iteration in y direction
for (int yf = 1; yf < y_steps; yf++) {
__syncthreads();
//-------------------> FIR
spos = blockIdx.x*(EpT) + 2*threadIdx.x;
gpos = blockIdx.y*y_steps*nTimesamples + yf*nTimesamples + spos;
Bw[0]=0; Bw[1]=0;
if( (spos+4)<(nTimesamples-offset) ){
ftemp1.x= __ldg(&d_input[gpos]);
ftemp1.y= __ldg(&d_input[gpos+1]);
ftemp2.x= __ldg(&d_input[gpos+2]);
ftemp2.y= __ldg(&d_input[gpos+3]);
ftemp3.x= __ldg(&d_input[gpos+4]);
Bw[0]=ftemp1.x + ftemp1.y + ftemp2.x + ftemp2.y;
Bw[1]=ftemp1.y + ftemp2.x + ftemp2.y + ftemp3.x;
//if( (__ldg(&d_bv_in[gpos]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos]) );
//}
//if( (__ldg(&d_bv_in[gpos+1]) > (signal_mean - bln_sigma_constant*signal_sd)) && (__ldg(&d_bv_in[gpos+1]) < (signal_mean + bln_sigma_constant*signal_sd)) ){
// if(j==0) Initiate( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
// else Add_one( &M_b, &S_b, &j_b, __ldg(&d_bv_in[gpos+1]) );
//}
}
s_input[2*threadIdx.x] = Bw[0];
s_input[2*threadIdx.x+1] = Bw[1];
__syncthreads();
for(d=4; d<nTaps; d=d+4){
local_id = threadIdx.x+(d>>1);
if( local_id<=limit ){
Bw[0] = Bw[0] + s_input[2*local_id]; Bw[1] = Bw[1] + s_input[2*local_id+1];
}
}
if( ( (__ldg(&d_bv_in[gpos]) + Bw[0]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos]) + Bw[0]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos]) + Bw[0] );
}
if( ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) > (signal_mean_taps - bln_sigma_constant*signal_sd_taps)) && ( (__ldg(&d_bv_in[gpos+1]) + Bw[1]) < (signal_mean_taps + bln_sigma_constant*signal_sd_taps)) ){
if(j==0) Initiate( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
else Add_one( &M, &S, &j, __ldg(&d_bv_in[gpos+1]) + Bw[1] );
}
}
__syncthreads();
s_input[threadIdx.x] = 0;
s_input[blockDim.x + threadIdx.x] = 0;
s_input[2*blockDim.x + threadIdx.x] = 0;
s_base[threadIdx.x] = 0;
s_base[blockDim.x + threadIdx.x] = 0;
s_base[2*blockDim.x + threadIdx.x] = 0;
__syncthreads();
spos=blockIdx.x*(EpT) + 2*threadIdx.x;
if( local_id<=limit ) {
// Note: ommited number of samples in the last trailing threadblocks is due to -nTaps which is here.
// Missing data should be contained in local_id. Thus this code is missing some time sample even it it does not need to.
// When removed it produces different number of added time samples in j and j_b which is wierd
if( spos<(nTimesamples-offset-nTaps) ) { // -nTaps might not be necessary
s_input[local_id] = M;
s_input[blockDim.x + local_id] = S;
s_input[2*blockDim.x + local_id] = j;
s_base[local_id] = M_b;
s_base[blockDim.x + local_id] = S_b;
s_base[2*blockDim.x + local_id] = j_b;
}
}
__syncthreads();
//------------------------------------------------------------------------------------
//---------> StrDev of processed input
Reduce_SM( &M, &S, &j, s_input );
Reduce_WARP( &M, &S, &j);
//------------------------------------------------------------------------------------
//---------> StrDev of unprocessed input
Reduce_SM( &M_b, &S_b, &j_b, s_base );
Reduce_WARP( &M_b, &S_b, &j_b);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
gpos = blockIdx.y*gridDim.x + blockIdx.x;
d_output_taps[3*gpos] = M;
d_output_taps[3*gpos + 1] = S;
d_output_taps[3*gpos + 2] = j;
d_output[3*gpos] = M_b;
d_output[3*gpos + 1] = S_b;
d_output[3*gpos + 2] = j_b;
}
}
__global__ void MSD_GPU_final_create_LA_nonregular(float *d_input, float *d_output, float *d_MSD_base, int nTaps, int size) {
__shared__ float s_input[3*WARP*WARP];
float M, S, j;
Sum_partials_nonregular( &M, &S, &j, d_input, s_input, size);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
d_output[0] = d_MSD_base[0];
d_output[1] = d_MSD_base[1];
d_output[2] = (sqrt(S / j) - d_MSD_base[1])/( (float) (nTaps-1));
}
}
__global__ void MSD_GPU_final_create_LA_Nth_nonregular(float *d_input, float *d_output, float *d_MSD_base, float *d_MSD_DIT, int nTaps, int size, int DIT_value) {
__shared__ float s_input[3*WARP*WARP];
float M, S, j;
Sum_partials_nonregular( &M, &S, &j, d_input, s_input, size);
//----------------------------------------------
//---- Writing data
if (threadIdx.x == 0) {
d_output[0] = d_MSD_base[0];
d_output[1] = d_MSD_base[1];
d_output[2] = (sqrt(S / j) - d_MSD_base[1])/( (float) (nTaps));
d_output[3] = d_MSD_DIT[0]*DIT_value;//*DIT_value
}
}
#endif
|
2a22fa860c57aba8bb8327e359a0985286ef91c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "volrenraycastcuda.cuda.h"
#include <stdio.h>
#include "cutil_math.h"
__constant__ int volWidth, volHeight, volDepth;
__constant__ int nLights;
__constant__ CudaLight lights[10];
static const int maxBlockSize2D = 16;
static texture<float, hipTextureType3D, hipReadModeElementType> volTex;
static texture<float4, hipTextureType2D, hipReadModeElementType> tfFullTex;
static texture<float4, hipTextureType2D, hipReadModeElementType> tfBackTex;
static texture<float4, hipTextureType2D, hipReadModeElementType> entryTex;
static texture<float4, hipTextureType2D, hipReadModeElementType> exitTex;
#define cc(ans) { gpuAssert((ans), __FILE__, __LINE__); }
static void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static dim3 getDimBlock2D(int w, int h)
{
dim3 dimBlock;
if (w < maxBlockSize2D)
dimBlock.x = w;
else
dimBlock.x = maxBlockSize2D;
if (h < maxBlockSize2D)
dimBlock.y = h;
else
dimBlock.y = maxBlockSize2D;
return dimBlock;
}
static dim3 getDimGrid2D(int w, int h)
{
dim3 dimGrid;
if (w < maxBlockSize2D)
dimGrid.x = 1;
else
dimGrid.x = int(ceil(float(w) / maxBlockSize2D));
if (h < maxBlockSize2D)
dimGrid.y = 1;
else
dimGrid.y = int(ceil(float(h) / maxBlockSize2D));
return dimGrid;
}
__device__ static float3 makeGradient(float3 spot)
{
float3 gradient;
gradient.x = 0.5 * (tex3D(volTex, spot.x * volWidth + 1.f, spot.y * volHeight, spot.z * volDepth)
- tex3D(volTex, spot.x * volWidth - 1.f, spot.y * volHeight, spot.z * volDepth));
gradient.y = 0.5 * (tex3D(volTex, spot.x * volWidth, spot.y * volHeight + 1.f, spot.z * volDepth)
- tex3D(volTex, spot.x * volWidth, spot.y * volHeight - 1.f, spot.z * volDepth));
gradient.z = 0.5 * (tex3D(volTex, spot.x * volWidth, spot.y * volHeight, spot.z * volDepth + 1.f)
- tex3D(volTex, spot.x * volWidth, spot.y * volHeight, spot.z * volDepth - 1.f));
return gradient;
}
__device__ static float4 getLightFactor(float3 grad, float3 view)
{
if (nLights == 0)
return make_float4(1.f, 1.f, 1.f, 1.f);
float3 V = normalize(-view);
float3 N = normalize(-grad);
float4 acc = make_float4(0.f, 0.f, 0.f, 0.f);
for (int i = 0; i < nLights; ++i)
{
float3 kd = lights[i].diffuse;
float3 ka = lights[i].ambient;
float3 ks = lights[i].specular;
float shininess = lights[i].shininess;
float3 L = normalize(make_float3(0.f, 0.f, 0.f) - lights[i].direction);
float3 R = normalize(make_float3(0.f, 0.f, 0.f) - reflect(L, N));
float3 diffuse = kd * max(dot(L, N), 0.f);
float3 specular = ks * pow(max(dot(R, V), 0.f), shininess);
float3 cf = ka + diffuse + specular;
float af = 1.f;
acc += make_float4(cf.x, cf.y, cf.z, af);
}
return make_float4(acc.x, acc.y, acc.z, 1.f);
}
__global__ static void castray(int tfWidth, int tfHeight, float stepSize,
float scalarMin, float scalarMax,
int texWidth, int texHeight, float* outPtr)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= texWidth || y >= texHeight)
return;
float3 entry = make_float3(tex2D(entryTex, x + 0.5f, y + 0.5f));
float3 exit = make_float3(tex2D(exitTex, x + 0.5f, y + 0.5f));
float3 dir = normalize(exit - entry);
float maxLength = length(exit - entry);
float2 scalar = make_float2(0.f, 0.f);
scalar.y = tex3D(volTex, entry.x * volWidth, entry.y * volHeight, entry.z * volDepth);
scalar.y = clamp(float((scalar.y - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
float3 spotPrev = entry;
float3 spotCurr;
float4 lfPrev = getLightFactor(makeGradient(entry), dir);
float4 lfCurr;
float4 acc = make_float4(0.f, 0.f, 0.f, 0.f);
for (int step = 1; step * stepSize < maxLength; ++step)
{
spotCurr = entry + dir * (step * stepSize);
scalar.x = tex3D(volTex, spotCurr.x * volWidth, spotCurr.y * volHeight, spotCurr.z * volDepth);
scalar.x = clamp(float((scalar.x - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
float4 colorFull = tex2D(tfFullTex, scalar.x * tfWidth, scalar.y * tfHeight);
float4 colorBack = tex2D(tfBackTex, scalar.x * tfWidth, scalar.y * tfHeight);
float4 colorFront = colorFull - colorBack;
lfCurr = getLightFactor(makeGradient(spotCurr), dir);
acc += (colorBack * lfCurr + colorFront * lfPrev) * (1.0 - acc.w);
if (acc.w > 0.999f)
break;
scalar.y = scalar.x;
spotPrev = spotCurr;
lfPrev = lfCurr;
}
outPtr[3 * (texWidth * y + x) + 0] = acc.x;
outPtr[3 * (texWidth * y + x) + 1] = acc.y;
outPtr[3 * (texWidth * y + x) + 2] = acc.z;
}
void cudacast(int ivolWidth, int ivolHeight, int ivolDepth, hipArray* volArr,
int tfWidth, int tfHeight, float stepSize, hipTextureFilterMode filter, hipArray* tfFullArr, hipArray *tfBackArr,
float scalarMin, float scalarMax,
int texWidth, int texHeight, hipArray *entryArr, hipArray *exitArr, float *outPtr)
{
hipMemcpyToSymbol(volWidth, &ivolWidth, sizeof(int));
hipMemcpyToSymbol(volHeight, &ivolHeight, sizeof(int));
hipMemcpyToSymbol(volDepth, &ivolDepth, sizeof(int));
hipBindTextureToArray(volTex, volArr);
volTex.filterMode = hipFilterModeLinear;
hipBindTextureToArray(tfFullTex, tfFullArr);
tfFullTex.filterMode = filter;
hipBindTextureToArray(tfBackTex, tfBackArr);
tfBackTex.filterMode = filter;
hipBindTextureToArray(entryTex, entryArr);
entryTex.filterMode = hipFilterModeLinear;
hipBindTextureToArray(exitTex, exitArr);
exitTex.filterMode = hipFilterModeLinear;
dim3 dimBlock = getDimBlock2D(texWidth, texHeight);
dim3 dimGrid = getDimGrid2D(texWidth, texHeight);
hipLaunchKernelGGL(( castray), dim3(dimGrid), dim3(dimBlock), 0, 0, tfWidth, tfHeight, stepSize,
scalarMin, scalarMax,
texWidth, texHeight, outPtr);
hipUnbindTexture(exitTex);
hipUnbindTexture(entryTex);
hipUnbindTexture(tfFullTex);
hipUnbindTexture(tfBackTex);
hipUnbindTexture(volTex);
}
void cudaSetLights(int inLights, CudaLight ilights[])
{
cc(hipMemcpyToSymbol(nLights, &inLights, sizeof(int)));
cc(hipMemcpyToSymbol(lights, &ilights[0], 10 * sizeof(CudaLight)));
}
| 2a22fa860c57aba8bb8327e359a0985286ef91c8.cu | #include "volrenraycastcuda.cuda.h"
#include <stdio.h>
#include "cutil_math.h"
__constant__ int volWidth, volHeight, volDepth;
__constant__ int nLights;
__constant__ CudaLight lights[10];
static const int maxBlockSize2D = 16;
static texture<float, cudaTextureType3D, cudaReadModeElementType> volTex;
static texture<float4, cudaTextureType2D, cudaReadModeElementType> tfFullTex;
static texture<float4, cudaTextureType2D, cudaReadModeElementType> tfBackTex;
static texture<float4, cudaTextureType2D, cudaReadModeElementType> entryTex;
static texture<float4, cudaTextureType2D, cudaReadModeElementType> exitTex;
#define cc(ans) { gpuAssert((ans), __FILE__, __LINE__); }
static void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static dim3 getDimBlock2D(int w, int h)
{
dim3 dimBlock;
if (w < maxBlockSize2D)
dimBlock.x = w;
else
dimBlock.x = maxBlockSize2D;
if (h < maxBlockSize2D)
dimBlock.y = h;
else
dimBlock.y = maxBlockSize2D;
return dimBlock;
}
static dim3 getDimGrid2D(int w, int h)
{
dim3 dimGrid;
if (w < maxBlockSize2D)
dimGrid.x = 1;
else
dimGrid.x = int(ceil(float(w) / maxBlockSize2D));
if (h < maxBlockSize2D)
dimGrid.y = 1;
else
dimGrid.y = int(ceil(float(h) / maxBlockSize2D));
return dimGrid;
}
__device__ static float3 makeGradient(float3 spot)
{
float3 gradient;
gradient.x = 0.5 * (tex3D(volTex, spot.x * volWidth + 1.f, spot.y * volHeight, spot.z * volDepth)
- tex3D(volTex, spot.x * volWidth - 1.f, spot.y * volHeight, spot.z * volDepth));
gradient.y = 0.5 * (tex3D(volTex, spot.x * volWidth, spot.y * volHeight + 1.f, spot.z * volDepth)
- tex3D(volTex, spot.x * volWidth, spot.y * volHeight - 1.f, spot.z * volDepth));
gradient.z = 0.5 * (tex3D(volTex, spot.x * volWidth, spot.y * volHeight, spot.z * volDepth + 1.f)
- tex3D(volTex, spot.x * volWidth, spot.y * volHeight, spot.z * volDepth - 1.f));
return gradient;
}
__device__ static float4 getLightFactor(float3 grad, float3 view)
{
if (nLights == 0)
return make_float4(1.f, 1.f, 1.f, 1.f);
float3 V = normalize(-view);
float3 N = normalize(-grad);
float4 acc = make_float4(0.f, 0.f, 0.f, 0.f);
for (int i = 0; i < nLights; ++i)
{
float3 kd = lights[i].diffuse;
float3 ka = lights[i].ambient;
float3 ks = lights[i].specular;
float shininess = lights[i].shininess;
float3 L = normalize(make_float3(0.f, 0.f, 0.f) - lights[i].direction);
float3 R = normalize(make_float3(0.f, 0.f, 0.f) - reflect(L, N));
float3 diffuse = kd * max(dot(L, N), 0.f);
float3 specular = ks * pow(max(dot(R, V), 0.f), shininess);
float3 cf = ka + diffuse + specular;
float af = 1.f;
acc += make_float4(cf.x, cf.y, cf.z, af);
}
return make_float4(acc.x, acc.y, acc.z, 1.f);
}
__global__ static void castray(int tfWidth, int tfHeight, float stepSize,
float scalarMin, float scalarMax,
int texWidth, int texHeight, float* outPtr)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= texWidth || y >= texHeight)
return;
float3 entry = make_float3(tex2D(entryTex, x + 0.5f, y + 0.5f));
float3 exit = make_float3(tex2D(exitTex, x + 0.5f, y + 0.5f));
float3 dir = normalize(exit - entry);
float maxLength = length(exit - entry);
float2 scalar = make_float2(0.f, 0.f);
scalar.y = tex3D(volTex, entry.x * volWidth, entry.y * volHeight, entry.z * volDepth);
scalar.y = clamp(float((scalar.y - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
float3 spotPrev = entry;
float3 spotCurr;
float4 lfPrev = getLightFactor(makeGradient(entry), dir);
float4 lfCurr;
float4 acc = make_float4(0.f, 0.f, 0.f, 0.f);
for (int step = 1; step * stepSize < maxLength; ++step)
{
spotCurr = entry + dir * (step * stepSize);
scalar.x = tex3D(volTex, spotCurr.x * volWidth, spotCurr.y * volHeight, spotCurr.z * volDepth);
scalar.x = clamp(float((scalar.x - scalarMin) / (scalarMax - scalarMin)), 0.f, 1.f);
float4 colorFull = tex2D(tfFullTex, scalar.x * tfWidth, scalar.y * tfHeight);
float4 colorBack = tex2D(tfBackTex, scalar.x * tfWidth, scalar.y * tfHeight);
float4 colorFront = colorFull - colorBack;
lfCurr = getLightFactor(makeGradient(spotCurr), dir);
acc += (colorBack * lfCurr + colorFront * lfPrev) * (1.0 - acc.w);
if (acc.w > 0.999f)
break;
scalar.y = scalar.x;
spotPrev = spotCurr;
lfPrev = lfCurr;
}
outPtr[3 * (texWidth * y + x) + 0] = acc.x;
outPtr[3 * (texWidth * y + x) + 1] = acc.y;
outPtr[3 * (texWidth * y + x) + 2] = acc.z;
}
void cudacast(int ivolWidth, int ivolHeight, int ivolDepth, cudaArray* volArr,
int tfWidth, int tfHeight, float stepSize, cudaTextureFilterMode filter, cudaArray* tfFullArr, cudaArray *tfBackArr,
float scalarMin, float scalarMax,
int texWidth, int texHeight, cudaArray *entryArr, cudaArray *exitArr, float *outPtr)
{
cudaMemcpyToSymbol(volWidth, &ivolWidth, sizeof(int));
cudaMemcpyToSymbol(volHeight, &ivolHeight, sizeof(int));
cudaMemcpyToSymbol(volDepth, &ivolDepth, sizeof(int));
cudaBindTextureToArray(volTex, volArr);
volTex.filterMode = cudaFilterModeLinear;
cudaBindTextureToArray(tfFullTex, tfFullArr);
tfFullTex.filterMode = filter;
cudaBindTextureToArray(tfBackTex, tfBackArr);
tfBackTex.filterMode = filter;
cudaBindTextureToArray(entryTex, entryArr);
entryTex.filterMode = cudaFilterModeLinear;
cudaBindTextureToArray(exitTex, exitArr);
exitTex.filterMode = cudaFilterModeLinear;
dim3 dimBlock = getDimBlock2D(texWidth, texHeight);
dim3 dimGrid = getDimGrid2D(texWidth, texHeight);
castray<<<dimGrid, dimBlock>>>(tfWidth, tfHeight, stepSize,
scalarMin, scalarMax,
texWidth, texHeight, outPtr);
cudaUnbindTexture(exitTex);
cudaUnbindTexture(entryTex);
cudaUnbindTexture(tfFullTex);
cudaUnbindTexture(tfBackTex);
cudaUnbindTexture(volTex);
}
void cudaSetLights(int inLights, CudaLight ilights[])
{
cc(cudaMemcpyToSymbol(nLights, &inLights, sizeof(int)));
cc(cudaMemcpyToSymbol(lights, &ilights[0], 10 * sizeof(CudaLight)));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.