hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
0f3a710b6263d3317296b56a53a44a4abde3642a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// clang-format off
#include "codegen/embedding_forward_template_helpers.cuh"
using Tensor = at::Tensor;
using namespace fbgemm_gpu;
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
constexpr size_t kForwardMaxThreads = 512;
// TODO: optimization to use multiple warps per row.
template <typename emb_t, typename grad_t, typename cache_t, size_t kMaxVecsPerThread>
__global__
__launch_bounds__(kForwardMaxThreads) void {{ "dense" if dense else "split" }}_embedding_codegen_grad_indice_weights_kernel(
// [\sum_t E_t x D_t]
const at::PackedTensorAccessor64<grad_t, 2, at::RestrictPtrTraits>
grad_output,
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights,
{% if not dense %}
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
weights_placements,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
indices, // [N = \sum_{b,t} L_{b,t} total indices, i.e. flattened
// [B][T][L]
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
offsets, // [B x T + 1]
{% if not dense %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
lxu_cache_locations,
{% endif %}
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
feature_requires_grad, // [T],
at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits>
grad_indice_weights
) {
int32_t B = grad_output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t t = b_t / B;
int32_t b = b_t % B;
if (b_t >= B * T) {
return;
}
int64_t weights_offset = weights_offsets[t];
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
int64_t indices_start = offsets[t * B + b];
int64_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
if (feature_requires_grad.size(0) > 0 && !feature_requires_grad[t]) {
// If the table does not require gradient computation, we set the gradient to zero.
for (int32_t l_start = 0; l_start < L; l_start += kWarpSize) {
int32_t l = l_start + threadIdx.x;
if (l < L) {
grad_indice_weights[indices_start + l] = 0.0;
}
}
return;
}
const emb_t* __restrict__ weights;
{% if not dense %}
const auto placement = static_cast<PlacementType>(weights_placements[t]);
if (placement == PlacementType::DEVICE) {
weights = &dev_weights[weights_offset];
} else {
weights = &uvm_weights[weights_offset];
}
{% else %}
weights = &dev_weights[weights_offset];
{% endif %}
Vec4T<at::acc_type<cache_t, true>> grad_out[kMaxVecsPerThread];
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<grad_t, true>> go((&grad_output[b][0]) + D_start + d);
grad_out[i] = go;
}
for (int32_t l_start = 0; l_start < L; l_start += kWarpSize) {
int32_t l = l_start + threadIdx.x;
int64_t idx = l < L ? indices[indices_start + l] : 0;
{% if not dense %}
int32_t cache_idx = (placement == PlacementType::MANAGED_CACHING && l < L) ? lxu_cache_locations[indices_start + l] : 0;
{% endif %}
for (auto j = 0; j < kWarpSize && l_start + j < L; ++j) {
int64_t idx_j = shfl_sync(idx, j);
{% if not dense %}
int32_t cache_idx_j = shfl_sync(cache_idx, j);
{% endif %}
at::acc_type<cache_t, true> grad_indice_weight = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
{% if not dense %}
if (placement == PlacementType::MANAGED_CACHING && cache_idx_j != kCacheLocationMissing) {
Vec4T<cache_t> weight(&lxu_cache_weights[cache_idx_j][d]);
grad_indice_weight += weight.acc.x * grad_out[i].acc.x +
weight.acc.y * grad_out[i].acc.y +
weight.acc.z * grad_out[i].acc.z + weight.acc.w * grad_out[i].acc.w;
} else {
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(
const_cast<emb_t*>(&weights[idx_j * D_emb]),
nullptr,
D,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row.load_qparams();
}
Vec4T<at::acc_type<cache_t, true>> weight =
weight_row.load(d, qparams);
grad_indice_weight += weight.acc.x * grad_out[i].acc.x +
weight.acc.y * grad_out[i].acc.y +
weight.acc.z * grad_out[i].acc.z + weight.acc.w * grad_out[i].acc.w;
}
{% else %}
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(
const_cast<emb_t*>(&weights[idx_j * D_emb]),
nullptr,
D,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row.load_qparams();
}
Vec4T<at::acc_type<cache_t, true>> weight =
weight_row.load(d, qparams);
grad_indice_weight += weight.acc.x * grad_out[i].acc.x +
weight.acc.y * grad_out[i].acc.y +
weight.acc.z * grad_out[i].acc.z + weight.acc.w * grad_out[i].acc.w;
{% endif %}
}
grad_indice_weight =
warpReduceAllSum<at::acc_type<cache_t, true>>(grad_indice_weight);
if (threadIdx.x == 0) {
grad_indice_weights[indices_start + l_start + j] = grad_indice_weight;
}
}
}
}
Tensor {{ "dense" if dense else "split" }}_embedding_codegen_grad_indice_weights_cuda(
Tensor grad_output,
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
Tensor D_offsets,
int64_t max_D,
Tensor indices,
Tensor offsets,
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
Tensor feature_requires_grad) {
TENSOR_ON_CUDA_GPU(grad_output);
TENSOR_ON_CUDA_GPU(dev_weights);
{% if not dense %}
TENSOR_ON_CUDA_GPU(uvm_weights);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(weights_placements);
{% endif %}
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(indices);
TENSOR_ON_CUDA_GPU(offsets);
{% if not dense %}
TENSOR_ON_CUDA_GPU(lxu_cache_locations);
{% endif %}
if (feature_requires_grad.defined()) {
TENSOR_ON_CUDA_GPU(feature_requires_grad);
}
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(dev_weights.get_device());
const auto T = D_offsets.size(0) - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
const auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B >= 0);
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
auto grad_indice_weights = empty_like(indices, indices.options().dtype(at::toAccumulateType(grad_output.scalar_type(), true)));
if (B == 0) {
return grad_indice_weights;
}
feature_requires_grad = feature_requires_grad.defined() ? feature_requires_grad : at::empty({0}, indices.options().dtype(at::kInt));
DISPATCH_EMB_GRAD_CACHE_TYPES(
dev_weights.scalar_type(),
grad_output.scalar_type(),
{% if not dense %}
lxu_cache_weights.scalar_type(),
{% else %}
dev_weights.scalar_type(),
{% endif %}
"split_embedding_codegen_grad_indice_weights_kernel",
[&] {
{% for kMaxVecsPerThread in range(1, max_embedding_dim // items_per_warp + 1) %}
if (max_D <= {{ items_per_warp * kMaxVecsPerThread }}) {
{{ "dense" if dense else "split" }hipLaunchKernelGGL((}_embedding_codegen_grad_indice_weights_kernel<
emb_t,
grad_t,
cache_t,
{{ kMaxVecsPerThread }}>),
dim3(div_round_up((B * T), kForwardMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kForwardMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output.packed_accessor64<grad_t, 2, at::RestrictPtrTraits>(),
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
{% if not dense %}
uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
indices.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% if not dense %}
lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
feature_requires_grad.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
grad_indice_weights.packed_accessor32<at::acc_type<grad_t, true>, 1, at::RestrictPtrTraits>()
);
return;
}
{% endfor %}
});
C10_HIP_KERNEL_LAUNCH_CHECK();
return grad_indice_weights;
}
// clang-format on
| 0f3a710b6263d3317296b56a53a44a4abde3642a.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// clang-format off
#include "codegen/embedding_forward_template_helpers.cuh"
using Tensor = at::Tensor;
using namespace fbgemm_gpu;
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
constexpr size_t kForwardMaxThreads = 512;
// TODO: optimization to use multiple warps per row.
template <typename emb_t, typename grad_t, typename cache_t, size_t kMaxVecsPerThread>
__global__
__launch_bounds__(kForwardMaxThreads) void {{ "dense" if dense else "split" }}_embedding_codegen_grad_indice_weights_kernel(
// [\sum_t E_t x D_t]
const at::PackedTensorAccessor64<grad_t, 2, at::RestrictPtrTraits>
grad_output,
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> dev_weights,
{% if not dense %}
at::PackedTensorAccessor64<emb_t, 1, at::RestrictPtrTraits> uvm_weights,
at::PackedTensorAccessor64<cache_t, 2, at::RestrictPtrTraits> lxu_cache_weights,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
weights_placements,
{% endif %}
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits> weights_offsets,
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits> D_offsets,
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
indices, // [N = \sum_{b,t} L_{b,t} total indices, i.e. flattened
// [B][T][L]
const at::PackedTensorAccessor32<int64_t, 1, at::RestrictPtrTraits>
offsets, // [B x T + 1]
{% if not dense %}
const at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
lxu_cache_locations,
{% endif %}
at::PackedTensorAccessor32<int32_t, 1, at::RestrictPtrTraits>
feature_requires_grad, // [T],
at::PackedTensorAccessor32<at::acc_type<cache_t, true>, 1, at::RestrictPtrTraits>
grad_indice_weights
) {
int32_t B = grad_output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t t = b_t / B;
int32_t b = b_t % B;
if (b_t >= B * T) {
return;
}
int64_t weights_offset = weights_offsets[t];
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
int64_t indices_start = offsets[t * B + b];
int64_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
if (feature_requires_grad.size(0) > 0 && !feature_requires_grad[t]) {
// If the table does not require gradient computation, we set the gradient to zero.
for (int32_t l_start = 0; l_start < L; l_start += kWarpSize) {
int32_t l = l_start + threadIdx.x;
if (l < L) {
grad_indice_weights[indices_start + l] = 0.0;
}
}
return;
}
const emb_t* __restrict__ weights;
{% if not dense %}
const auto placement = static_cast<PlacementType>(weights_placements[t]);
if (placement == PlacementType::DEVICE) {
weights = &dev_weights[weights_offset];
} else {
weights = &uvm_weights[weights_offset];
}
{% else %}
weights = &dev_weights[weights_offset];
{% endif %}
Vec4T<at::acc_type<cache_t, true>> grad_out[kMaxVecsPerThread];
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<grad_t, true>> go((&grad_output[b][0]) + D_start + d);
grad_out[i] = go;
}
for (int32_t l_start = 0; l_start < L; l_start += kWarpSize) {
int32_t l = l_start + threadIdx.x;
int64_t idx = l < L ? indices[indices_start + l] : 0;
{% if not dense %}
int32_t cache_idx = (placement == PlacementType::MANAGED_CACHING && l < L) ? lxu_cache_locations[indices_start + l] : 0;
{% endif %}
for (auto j = 0; j < kWarpSize && l_start + j < L; ++j) {
int64_t idx_j = shfl_sync(idx, j);
{% if not dense %}
int32_t cache_idx_j = shfl_sync(cache_idx, j);
{% endif %}
at::acc_type<cache_t, true> grad_indice_weight = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
{% if not dense %}
if (placement == PlacementType::MANAGED_CACHING && cache_idx_j != kCacheLocationMissing) {
Vec4T<cache_t> weight(&lxu_cache_weights[cache_idx_j][d]);
grad_indice_weight += weight.acc.x * grad_out[i].acc.x +
weight.acc.y * grad_out[i].acc.y +
weight.acc.z * grad_out[i].acc.z + weight.acc.w * grad_out[i].acc.w;
} else {
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(
const_cast<emb_t*>(&weights[idx_j * D_emb]),
nullptr,
D,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row.load_qparams();
}
Vec4T<at::acc_type<cache_t, true>> weight =
weight_row.load(d, qparams);
grad_indice_weight += weight.acc.x * grad_out[i].acc.x +
weight.acc.y * grad_out[i].acc.y +
weight.acc.z * grad_out[i].acc.z + weight.acc.w * grad_out[i].acc.w;
}
{% else %}
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(
const_cast<emb_t*>(&weights[idx_j * D_emb]),
nullptr,
D,
nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value) {
qparams = weight_row.load_qparams();
}
Vec4T<at::acc_type<cache_t, true>> weight =
weight_row.load(d, qparams);
grad_indice_weight += weight.acc.x * grad_out[i].acc.x +
weight.acc.y * grad_out[i].acc.y +
weight.acc.z * grad_out[i].acc.z + weight.acc.w * grad_out[i].acc.w;
{% endif %}
}
grad_indice_weight =
warpReduceAllSum<at::acc_type<cache_t, true>>(grad_indice_weight);
if (threadIdx.x == 0) {
grad_indice_weights[indices_start + l_start + j] = grad_indice_weight;
}
}
}
}
Tensor {{ "dense" if dense else "split" }}_embedding_codegen_grad_indice_weights_cuda(
Tensor grad_output,
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
Tensor D_offsets,
int64_t max_D,
Tensor indices,
Tensor offsets,
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
Tensor feature_requires_grad) {
TENSOR_ON_CUDA_GPU(grad_output);
TENSOR_ON_CUDA_GPU(dev_weights);
{% if not dense %}
TENSOR_ON_CUDA_GPU(uvm_weights);
TENSOR_ON_CUDA_GPU(lxu_cache_weights);
TENSOR_ON_CUDA_GPU(weights_placements);
{% endif %}
TENSOR_ON_CUDA_GPU(weights_offsets);
TENSOR_ON_CUDA_GPU(D_offsets);
TENSOR_ON_CUDA_GPU(indices);
TENSOR_ON_CUDA_GPU(offsets);
{% if not dense %}
TENSOR_ON_CUDA_GPU(lxu_cache_locations);
{% endif %}
if (feature_requires_grad.defined()) {
TENSOR_ON_CUDA_GPU(feature_requires_grad);
}
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(dev_weights.get_device());
const auto T = D_offsets.size(0) - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
const auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B >= 0);
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
auto grad_indice_weights = empty_like(indices, indices.options().dtype(at::toAccumulateType(grad_output.scalar_type(), true)));
if (B == 0) {
return grad_indice_weights;
}
feature_requires_grad = feature_requires_grad.defined() ? feature_requires_grad : at::empty({0}, indices.options().dtype(at::kInt));
DISPATCH_EMB_GRAD_CACHE_TYPES(
dev_weights.scalar_type(),
grad_output.scalar_type(),
{% if not dense %}
lxu_cache_weights.scalar_type(),
{% else %}
dev_weights.scalar_type(),
{% endif %}
"split_embedding_codegen_grad_indice_weights_kernel",
[&] {
{% for kMaxVecsPerThread in range(1, max_embedding_dim // items_per_warp + 1) %}
if (max_D <= {{ items_per_warp * kMaxVecsPerThread }}) {
{{ "dense" if dense else "split" }}_embedding_codegen_grad_indice_weights_kernel<
emb_t,
grad_t,
cache_t,
{{ kMaxVecsPerThread }}><<<
div_round_up((B * T), kForwardMaxThreads / kWarpSize),
dim3(kWarpSize, kForwardMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
grad_output.packed_accessor64<grad_t, 2, at::RestrictPtrTraits>(),
dev_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
{% if not dense %}
uvm_weights.packed_accessor64<emb_t, 1, at::RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, at::RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
indices.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
offsets.packed_accessor32<int64_t, 1, at::RestrictPtrTraits>(),
{% if not dense %}
lxu_cache_locations.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
{% endif %}
feature_requires_grad.packed_accessor32<int32_t, 1, at::RestrictPtrTraits>(),
grad_indice_weights.packed_accessor32<at::acc_type<grad_t, true>, 1, at::RestrictPtrTraits>()
);
return;
}
{% endfor %}
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
return grad_indice_weights;
}
// clang-format on
|
f35319d0af37bd6b582751b731c87b51c5859a73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <hipcub/hipcub.hpp>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/util_math.cuh>
#include <cub/util_type.cuh>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/system/hip/detail/core/triple_chevron_launch.h>
#include <cstdio>
#include <limits>
#include <typeinfo>
#include "test_util.h"
#include <nv/target>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
int g_ptx_version;
int g_sm_count;
double g_device_giga_bandwidth;
bool g_verbose = false;
bool g_verbose_input = false;
int g_timing_iterations = 0;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CUB_SEGMENTED, // CUB segmented method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
CDP_SEGMENTED, // GPU-based segmented method
};
inline const char* BackendToString(Backend b)
{
switch (b)
{
case CUB:
return "CUB";
case CUB_SEGMENTED:
return "CUB_SEGMENTED";
case CDP:
return "CDP";
case CDP_SEGMENTED:
return "CDP_SEGMENTED";
default:
break;
}
return "";
}
// Custom max functor
struct CustomMax
{
/// Boolean max operator, returns <tt>(a > b) ? a : b</tt>
template <typename T, typename C>
__host__ __device__ auto operator()(T&& a, C&& b)
-> cub::detail::accumulator_t<hipcub::Max, T, C>
{
return CUB_MAX(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
ReductionOpT reduction_op)
{
using InputT = cub::detail::value_t<InputIteratorT>;
// The output value type
using OutputT = cub::detail::non_void_value_t<OutputIteratorT, InputT>;
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, reduction_op, identity);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
hipcub::Sum /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
hipcub::Min /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
hipcub::Max /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
hipcub::ArgMin /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
hipcub::ArgMax /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSegmentedReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
// The input value type
using InputT = cub::detail::value_t<InputIteratorT>;
// The output value type
using OutputT = cub::detail::non_void_value_t<OutputIteratorT, InputT>;
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets, reduction_op, identity);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
hipcub::Sum /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
hipcub::Min /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
hipcub::Max /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
hipcub::ArgMin /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
hipError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
hipError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
hipcub::ArgMax /*reduction_op*/)
{
// Invoke kernel to device reduction directly
hipError_t error = hipSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
#if TEST_CDP == 1
/**
* Simple wrapper kernel to invoke DeviceReduce
*/
template <int CubBackend,
typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
__global__ void CDPDispatchKernel(Int2Type<CubBackend> cub_backend,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void *d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
*d_cdp_error = Dispatch(cub_backend,
timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
max_segments,
d_segment_begin_offsets,
d_segment_end_offsets,
reduction_op);
*d_temp_storage_bytes = temp_storage_bytes;
}
/**
* Launch kernel and dispatch on device. Should only be called from host code.
* The CubBackend should be one of the non-CDP CUB backends to invoke from the
* device.
*/
template <int CubBackend,
typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
hipError_t LaunchCDPKernel(Int2Type<CubBackend> cub_backend,
int timing_iterations,
size_t *d_temp_storage_bytes,
hipError_t *d_cdp_error,
void *d_temp_storage,
size_t &temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
hipError_t retval =
thrust::cuda_cub::launcher::triple_chevron(1, 1, 0, 0)
.doit(CDPDispatchKernel<CubBackend,
InputIteratorT,
OutputIteratorT,
BeginOffsetIteratorT,
EndOffsetIteratorT,
ReductionOpT>,
cub_backend,
timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
max_segments,
d_segment_begin_offsets,
d_segment_end_offsets,
reduction_op);
CubDebugExit(retval);
CubDebugExit(cub::detail::device_synchronize());
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes,
d_temp_storage_bytes,
sizeof(size_t) * 1,
hipMemcpyDeviceToHost));
// Copy out error
CubDebugExit(hipMemcpy(&retval,
d_cdp_error,
sizeof(hipError_t) * 1,
hipMemcpyDeviceToHost));
return retval;
}
// Specializations of Dispatch that translate the CDP backend to the appropriate
// CUB backend, and uses the CUB backend to launch the CDP kernel.
#define DEFINE_CDP_DISPATCHER(CdpBackend, CubBackend) \
template <typename InputIteratorT, \
typename OutputIteratorT, \
typename BeginOffsetIteratorT, \
typename EndOffsetIteratorT, \
typename ReductionOpT> \
hipError_t Dispatch(Int2Type<CdpBackend>, \
int timing_iterations, \
size_t *d_temp_storage_bytes, \
hipError_t *d_cdp_error, \
\
void *d_temp_storage, \
size_t &temp_storage_bytes, \
InputIteratorT d_in, \
OutputIteratorT d_out, \
int num_items, \
int max_segments, \
BeginOffsetIteratorT d_segment_begin_offsets, \
EndOffsetIteratorT d_segment_end_offsets, \
ReductionOpT reduction_op) \
{ \
Int2Type<CubBackend> cub_backend{}; \
return LaunchCDPKernel(cub_backend, \
timing_iterations, \
d_temp_storage_bytes, \
d_cdp_error, \
d_temp_storage, \
temp_storage_bytes, \
d_in, \
d_out, \
num_items, \
max_segments, \
d_segment_begin_offsets, \
d_segment_end_offsets, \
reduction_op); \
}
DEFINE_CDP_DISPATCHER(CDP, CUB)
DEFINE_CDP_DISPATCHER(CDP_SEGMENTED, CUB_SEGMENTED)
#undef DEFINE_CDP_DISPATCHER
#endif // TEST_CDP
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Initialize problem
template <typename InputT>
void Initialize(
GenMode gen_mode,
InputT *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
}
if (g_verbose_input)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/// Solve problem (max/custom-max functor)
template <typename ReductionOpT, typename InputT, typename _OutputT>
struct Solution
{
using OutputT = _OutputT;
using InitT = OutputT;
using AccumT = cub::detail::accumulator_t<ReductionOpT, InitT, InputT>;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, ReductionOpT reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
AccumT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (min functor)
template <typename InputT, typename _OutputT>
struct Solution<hipcub::Min, InputT, _OutputT>
{
using OutputT = _OutputT;
using InitT = OutputT;
using AccumT = cub::detail::accumulator_t<hipcub::Min, InitT, InputT>;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, hipcub::Min reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
AccumT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (sum functor)
template <typename InputT, typename _OutputT>
struct Solution<hipcub::Sum, InputT, _OutputT>
{
using OutputT = _OutputT;
using InitT = OutputT;
using AccumT = cub::detail::accumulator_t<hipcub::Sum, InitT, InputT>;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, hipcub::Sum reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
AccumT aggregate;
InitValue(INTEGER_SEED, aggregate, 0);
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
aggregate = reduction_op(aggregate, h_in[j]);
h_reference[i] = static_cast<OutputT>(aggregate);
}
}
};
/// Solve problem (argmin functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<hipcub::ArgMin, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, hipcub::ArgMin reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
{
OutputT item(j - h_segment_begin_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmax functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<hipcub::ArgMax, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, hipcub::ArgMax reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
{
OutputT item(j - h_segment_begin_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Test DeviceReduce for a given problem input
template <
typename BackendT,
typename DeviceInputIteratorT,
typename DeviceOutputIteratorT,
typename HostReferenceIteratorT,
typename OffsetT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
void Test(
BackendT backend,
DeviceInputIteratorT d_in,
DeviceOutputIteratorT d_out,
OffsetT num_items,
OffsetT num_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op,
HostReferenceIteratorT h_reference)
{
// Input data types
using InputT = cub::detail::value_t<DeviceInputIteratorT>;
// Allocate CDP device arrays for temp storage size and error
size_t *d_temp_storage_bytes = NULL;
hipError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Inquire temp device storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets,
reduction_op));
// Allocate temp device storage
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets,
reduction_op));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
if (g_timing_iterations > 0)
{
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(backend, g_timing_iterations,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets,
reduction_op));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(InputT);
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak",
avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare);
}
/// Test DeviceReduce
template <
Backend BACKEND,
typename OutputValueT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
void SolveAndTest(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets,
EndOffsetIteratorT h_segment_end_offsets,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
using InputValueT = cub::detail::value_t<DeviceInputIteratorT>;
using SolutionT = Solution<ReductionOpT, InputValueT, OutputValueT>;
using OutputT = typename SolutionT::OutputT;
printf("\n\n%s hipcub::DeviceReduce<%s> %d items (%s), %d segments\n",
BackendToString(BACKEND),
typeid(ReductionOpT).name(),
num_items,
typeid(HostInputIteratorT).name(),
num_segments);
fflush(stdout);
// Allocate and solve solution
OutputT *h_reference = new OutputT[num_segments];
SolutionT::Solve(h_in, h_reference, num_segments, h_segment_begin_offsets, h_segment_end_offsets, reduction_op);
// Run with discard iterator
DiscardOutputIterator<OffsetT> discard_itr;
Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets, reduction_op, h_reference);
// Run with output data
OutputT *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments));
CubDebugExit(hipMemset(d_out, 0, sizeof(OutputT) * num_segments));
Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets, reduction_op, h_reference);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (h_reference) delete[] h_reference;
}
/// Test specific problem type
template <
Backend BACKEND,
typename InputT,
typename OutputT,
typename OffsetT,
typename ReductionOpT>
void TestProblem(
OffsetT num_items,
OffsetT num_segments,
GenMode gen_mode,
ReductionOpT reduction_op)
{
printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
fflush(stdout);
// Initialize value data
InputT* h_in = new InputT[num_items];
Initialize(gen_mode, h_in, num_items);
// Initialize segment data
OffsetT *h_segment_offsets = new OffsetT[num_segments + 1];
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
// Initialize device data
OffsetT *d_segment_offsets = NULL;
InputT *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice));
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_offsets, h_segment_offsets + 1, d_segment_offsets, d_segment_offsets + 1, reduction_op);
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/// Test different operators
template <
Backend BACKEND,
typename OutputT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
void TestByOp(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets,
EndOffsetIteratorT h_segment_end_offsets,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets)
{
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, CustomMax());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, Sum());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, Min());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, ArgMin());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, Max());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, ArgMax());
}
template<typename OffsetT>
struct TransformFunctor1
{
__host__ __device__ __forceinline__ OffsetT operator()(OffsetT offset) const
{
return offset;
}
};
template<typename OffsetT>
struct TransformFunctor2
{
__host__ __device__ __forceinline__ OffsetT operator()(OffsetT offset) const
{
return offset;
}
};
/// Test different backends
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByBackend(
OffsetT num_items,
OffsetT max_segments,
GenMode gen_mode)
{
#if TEST_CDP == 0
constexpr auto NonSegmentedBackend = CUB;
constexpr auto SegmentedBackend = CUB_SEGMENTED;
#else // TEST_CDP
constexpr auto NonSegmentedBackend = CDP;
constexpr auto SegmentedBackend = CDP_SEGMENTED;
#endif // TEST_CDP
// Initialize host data
printf("\n\nInitializing %d %s -> %s (gen mode %d)... ",
num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
InputT *h_in = new InputT[num_items];
OffsetT *h_segment_offsets = new OffsetT[max_segments + 1];
Initialize(gen_mode, h_in, num_items);
// Initialize device data
InputT *d_in = NULL;
OffsetT *d_segment_offsets = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1)));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice));
//
// Test single-segment implementations
//
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
// Page-aligned-input tests
TestByOp<NonSegmentedBackend, OutputT>(h_in, d_in, num_items, 1,
h_segment_offsets, h_segment_offsets + 1, (OffsetT*) NULL, (OffsetT*)NULL);
// Non-page-aligned-input tests
if (num_items > 1)
{
InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input);
TestByOp<NonSegmentedBackend, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1,
h_segment_offsets, h_segment_offsets + 1, (OffsetT*) NULL, (OffsetT*)NULL);
}
//
// Test segmented implementation
//
// Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment
int max_items_per_segment = 128000;
for (int num_segments = cub::DivideAndRoundUp(num_items, max_items_per_segment);
num_segments < max_segments;
num_segments = (num_segments * 32) + 1)
{
// Test with segment pointer
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice));
TestByOp<SegmentedBackend, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_offsets, h_segment_offsets + 1, d_segment_offsets, d_segment_offsets + 1);
// Test with segment iterator
typedef CastOp<OffsetT> IdentityOpT;
IdentityOpT identity_op;
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr(
h_segment_offsets,
identity_op);
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr(
d_segment_offsets,
identity_op);
TestByOp<SegmentedBackend, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_offsets_itr, h_segment_offsets_itr + 1, d_segment_offsets_itr, d_segment_offsets_itr + 1);
// Test with transform iterators of different types
typedef TransformFunctor1<OffsetT> TransformFunctor1T;
typedef TransformFunctor2<OffsetT> TransformFunctor2T;
TransformInputIterator<OffsetT, TransformFunctor1T, OffsetT*, OffsetT> h_segment_begin_offsets_itr(h_segment_offsets, TransformFunctor1T());
TransformInputIterator<OffsetT, TransformFunctor2T, OffsetT*, OffsetT> h_segment_end_offsets_itr(h_segment_offsets + 1, TransformFunctor2T());
TransformInputIterator<OffsetT, TransformFunctor1T, OffsetT*, OffsetT> d_segment_begin_offsets_itr(d_segment_offsets, TransformFunctor1T());
TransformInputIterator<OffsetT, TransformFunctor2T, OffsetT*, OffsetT> d_segment_end_offsets_itr(d_segment_offsets + 1, TransformFunctor2T());
TestByOp<SegmentedBackend, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets_itr, h_segment_end_offsets_itr,
d_segment_begin_offsets_itr, d_segment_end_offsets_itr);
}
if (h_in) delete[] h_in;
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
}
/// Test different input-generation modes
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByGenMode(
OffsetT num_items,
OffsetT max_segments)
{
//
// Test pointer support using different input-generation modes
//
TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM);
TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED);
TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM);
//
// Test iterator support using a constant-iterator and SUM
//
InputT val;
InitValue(UNIFORM, val, 0);
ConstantInputIterator<InputT, OffsetT> h_in(val);
OffsetT *h_segment_offsets = new OffsetT[1 + 1];
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
#if TEST_CDP == 0
constexpr auto Backend = CUB;
#else // TEST_CDP
constexpr auto Backend = CDP;
#endif // TEST_CDP
SolveAndTest<Backend, OutputT>(h_in, h_in, num_items, 1,
h_segment_offsets, h_segment_offsets + 1, (OffsetT*) NULL, (OffsetT*)NULL, Sum());
if (h_segment_offsets) delete[] h_segment_offsets;
}
/// Test different problem sizes
template <typename InputT, typename OutputT, typename OffsetT>
void TestBySize(OffsetT max_items, OffsetT max_segments, OffsetT tile_size)
{
// Test 0, 1, many
TestByGenMode<InputT, OutputT>(0, max_segments);
TestByGenMode<InputT, OutputT>(1, max_segments);
TestByGenMode<InputT, OutputT>(max_items, max_segments);
// Test random problem sizes from a log-distribution [8, max_items-ish)
int num_iterations = 8;
double max_exp = log(double(max_items)) / log(double(2.0));
for (int i = 0; i < num_iterations; ++i)
{
OffsetT num_items = (OffsetT)pow(2.0, RandomValue(max_exp - 3.0) + 3.0);
TestByGenMode<InputT, OutputT>(num_items, max_segments);
}
//
// White-box testing of single-segment problems around specific sizes
//
#if TEST_CDP == 0
constexpr auto Backend = CUB;
#else // TEST_CDP
constexpr auto Backend = CDP;
#endif // TEST_CDP
// Tile-boundaries: multiple blocks, one tile per block
TestProblem<Backend, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum());
// Tile-boundaries: multiple blocks, multiple tiles per block
OffsetT sm_occupancy = 32;
OffsetT occupancy = tile_size * sm_occupancy * g_sm_count;
TestProblem<Backend, InputT, OutputT>(occupancy, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum());
};
class CustomInputT
{
char m_val{};
public:
__host__ __device__ explicit CustomInputT(char val)
: m_val(val)
{}
__host__ __device__ int get() const { return static_cast<int>(m_val); }
};
class CustomAccumulatorT
{
int m_val{0};
int m_magic_value{42};
__host__ __device__ CustomAccumulatorT(int val)
: m_val(val)
{}
public:
__host__ __device__ CustomAccumulatorT()
{}
__host__ __device__ CustomAccumulatorT(const CustomAccumulatorT &in)
: m_val(in.is_valid() * in.get())
, m_magic_value(in.is_valid() * 42)
{}
__host__ __device__ void operator=(const CustomInputT &in)
{
if (this->is_valid())
{
m_val = in.get();
}
}
__host__ __device__ void operator=(const CustomAccumulatorT &in)
{
if (this->is_valid() && in.is_valid())
{
m_val = in.get();
}
}
__host__ __device__ CustomAccumulatorT
operator+(const CustomInputT &in) const
{
const int multiplier = this->is_valid();
return {(m_val + in.get()) * multiplier};
}
__host__ __device__ CustomAccumulatorT
operator+(const CustomAccumulatorT &in) const
{
const int multiplier = this->is_valid() && in.is_valid();
return {(m_val + in.get()) * multiplier};
}
__host__ __device__ int get() const { return m_val; }
__host__ __device__ bool is_valid() const { return m_magic_value == 42; }
};
class CustomOutputT
{
bool *m_d_flag{};
int m_expected{};
public:
__host__ __device__ CustomOutputT(bool *d_flag, int expected)
: m_d_flag(d_flag)
, m_expected(expected)
{}
__host__ __device__ void operator=(const CustomAccumulatorT &accum) const
{
*m_d_flag = accum.is_valid() && (accum.get() == m_expected);
}
};
__global__ void InitializeTestAccumulatorTypes(int num_items,
int expected,
bool *d_flag,
CustomInputT *d_in,
CustomOutputT *d_out)
{
const int idx = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
if (idx < num_items)
{
d_in[idx] = CustomInputT(1);
}
if (idx == 0)
{
*d_out = CustomOutputT{d_flag, expected};
}
}
template <typename T,
typename OffsetT>
void TestBigIndicesHelper(OffsetT num_items)
{
thrust::constant_iterator<T> const_iter(T{1});
thrust::device_vector<std::size_t> out(1);
std::size_t* d_out = thrust::raw_pointer_cast(out.data());
std::uint8_t *d_temp_storage{};
std::size_t temp_storage_bytes{};
CubDebugExit(
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, const_iter, d_out, num_items));
thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes);
d_temp_storage = thrust::raw_pointer_cast(temp_storage.data());
CubDebugExit(
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, const_iter, d_out, num_items));
std::size_t result = out[0];
AssertEquals(result, num_items);
}
template <typename T>
void TestBigIndices()
{
TestBigIndicesHelper<T, std::uint32_t>(1ull << 30);
TestBigIndicesHelper<T, std::uint32_t>(1ull << 31);
TestBigIndicesHelper<T, std::uint32_t>((1ull << 32) - 1);
TestBigIndicesHelper<T, std::uint64_t>(1ull << 33);
}
void TestAccumulatorTypes()
{
const int num_items = 2 * 1024 * 1024;
const int expected = num_items;
const int block_size = 256;
const int grid_size = (num_items + block_size - 1) / block_size;
CustomInputT *d_in{};
CustomOutputT *d_out{};
CustomAccumulatorT init{};
bool *d_flag{};
CubDebugExit(
g_allocator.DeviceAllocate((void **)&d_out, sizeof(CustomOutputT)));
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_flag, sizeof(bool)));
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_in,
sizeof(CustomInputT) * num_items));
hipLaunchKernelGGL(( InitializeTestAccumulatorTypes), dim3(grid_size), dim3(block_size), 0, 0, num_items,
expected,
d_flag,
d_in,
d_out);
std::uint8_t *d_temp_storage{};
std::size_t temp_storage_bytes{};
CubDebugExit(hipcub::DeviceReduce::Reduce(d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
hipcub::Sum{},
init));
CubDebugExit(
g_allocator.DeviceAllocate((void **)&d_temp_storage, temp_storage_bytes));
CubDebugExit(hipMemset(d_temp_storage, 1, temp_storage_bytes));
CubDebugExit(hipcub::DeviceReduce::Reduce(d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
hipcub::Sum{},
init));
bool ok{};
CubDebugExit(hipMemcpy(&ok, d_flag, sizeof(bool), hipMemcpyDeviceToHost));
AssertTrue(ok);
CubDebugExit(g_allocator.DeviceFree(d_out));
CubDebugExit(g_allocator.DeviceFree(d_in));
}
template <typename InputT, typename OutputT, typename OffsetT>
struct GetTileSize
{
OffsetT max_items{};
OffsetT max_segments{};
OffsetT tile_size{};
GetTileSize(OffsetT max_items, OffsetT max_segments)
: max_items(max_items)
, max_segments(max_segments)
{}
template <typename ActivePolicyT>
CUB_RUNTIME_FUNCTION hipError_t Invoke()
{
this->tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS *
ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD;
return hipSuccess;
}
};
/// Test problem type
template <typename InputT, typename OutputT, typename OffsetT>
void TestType(OffsetT max_items, OffsetT max_segments)
{
// Inspect the tuning policies to determine this arch's tile size:
using MaxPolicyT =
typename DeviceReducePolicy<InputT, OffsetT, hipcub::Sum>::MaxPolicy;
GetTileSize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments);
CubDebugExit(MaxPolicyT::Invoke(g_ptx_version, dispatch));
TestBySize<InputT, OutputT>(max_items, max_segments, dispatch.tile_size);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
typedef int OffsetT;
OffsetT max_items = 27000000;
OffsetT max_segments = 34000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", max_items);
args.GetCmdLineArgument("s", max_segments);
args.GetCmdLineArgument("i", g_timing_iterations);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--s=<num segments> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
// Get ptx version
CubDebugExit(PtxVersion(g_ptx_version));
// Get SM count
g_sm_count = args.deviceProp.multiProcessorCount;
// %PARAM% TEST_CDP cdp 0:1
// %PARAM% TEST_TYPES types 0:1:2:3
#if TEST_TYPES == 0
TestType<signed char, signed char>(max_items, max_segments);
TestType<unsigned char, unsigned char>(max_items, max_segments);
TestType<signed char, int>(max_items, max_segments);
#elif TEST_TYPES == 1
TestType<short, short>(max_items, max_segments);
TestType<int, int>(max_items, max_segments);
TestType<long, long>(max_items, max_segments);
TestType<long long, long long>(max_items, max_segments);
#elif TEST_TYPES == 2
TestType<uchar2, uchar2>(max_items, max_segments);
TestType<uint2, uint2>(max_items, max_segments);
TestType<ulonglong2, ulonglong2>(max_items, max_segments);
TestType<ulonglong4, ulonglong4>(max_items, max_segments);
#else // TEST_TYPES == 3
TestType<TestFoo, TestFoo>(max_items, max_segments);
TestType<TestBar, TestBar>(max_items, max_segments);
TestAccumulatorTypes();
TestBigIndices<std::size_t>();
#endif
printf("\n");
return 0;
}
| f35319d0af37bd6b582751b731c87b51c5859a73.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <cub/device/device_reduce.cuh>
#include <cub/device/device_segmented_reduce.cuh>
#include <cub/iterator/constant_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/util_allocator.cuh>
#include <cub/util_math.cuh>
#include <cub/util_type.cuh>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/system/cuda/detail/core/triple_chevron_launch.h>
#include <cstdio>
#include <limits>
#include <typeinfo>
#include "test_util.h"
#include <nv/target>
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
int g_ptx_version;
int g_sm_count;
double g_device_giga_bandwidth;
bool g_verbose = false;
bool g_verbose_input = false;
int g_timing_iterations = 0;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
CUB_SEGMENTED, // CUB segmented method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
CDP_SEGMENTED, // GPU-based segmented method
};
inline const char* BackendToString(Backend b)
{
switch (b)
{
case CUB:
return "CUB";
case CUB_SEGMENTED:
return "CUB_SEGMENTED";
case CDP:
return "CDP";
case CDP_SEGMENTED:
return "CDP_SEGMENTED";
default:
break;
}
return "";
}
// Custom max functor
struct CustomMax
{
/// Boolean max operator, returns <tt>(a > b) ? a : b</tt>
template <typename T, typename C>
__host__ __device__ auto operator()(T&& a, C&& b)
-> cub::detail::accumulator_t<cub::Max, T, C>
{
return CUB_MAX(a, b);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
ReductionOpT reduction_op)
{
using InputT = cub::detail::value_t<InputIteratorT>;
// The output value type
using OutputT = cub::detail::non_void_value_t<OutputIteratorT, InputT>;
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, reduction_op, identity);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
cub::Sum /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
cub::Min /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
cub::Max /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
cub::ArgMin /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int /*max_segments*/,
BeginOffsetIteratorT /*d_segment_begin_offsets*/,
EndOffsetIteratorT /*d_segment_end_offsets*/,
cub::ArgMax /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSegmentedReduce entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to reduce entrypoint (custom-max)
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT, typename ReductionOpT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
// The input value type
using InputT = cub::detail::value_t<InputIteratorT>;
// The output value type
using OutputT = cub::detail::non_void_value_t<OutputIteratorT, InputT>;
// Max-identity
OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets, reduction_op, identity);
}
return error;
}
/**
* Dispatch to sum entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
cub::Sum /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to min entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
cub::Min /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to max entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
cub::Max /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to argmin entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
cub::ArgMin /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
/**
* Dispatch to argmax entrypoint
*/
template <typename InputIteratorT, typename OutputIteratorT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION
cudaError_t Dispatch(
Int2Type<CUB_SEGMENTED> /*dispatch_to*/,
int timing_iterations,
size_t */*d_temp_storage_bytes*/,
cudaError_t */*d_cdp_error*/,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int /*num_items*/,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
cub::ArgMax /*reduction_op*/)
{
// Invoke kernel to device reduction directly
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_iterations; ++i)
{
error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes,
d_in, d_out, max_segments, d_segment_begin_offsets, d_segment_end_offsets);
}
return error;
}
//---------------------------------------------------------------------
// CUDA nested-parallelism test kernel
//---------------------------------------------------------------------
#if TEST_CDP == 1
/**
* Simple wrapper kernel to invoke DeviceReduce
*/
template <int CubBackend,
typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
__global__ void CDPDispatchKernel(Int2Type<CubBackend> cub_backend,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void *d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
*d_cdp_error = Dispatch(cub_backend,
timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
max_segments,
d_segment_begin_offsets,
d_segment_end_offsets,
reduction_op);
*d_temp_storage_bytes = temp_storage_bytes;
}
/**
* Launch kernel and dispatch on device. Should only be called from host code.
* The CubBackend should be one of the non-CDP CUB backends to invoke from the
* device.
*/
template <int CubBackend,
typename InputIteratorT,
typename OutputIteratorT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
cudaError_t LaunchCDPKernel(Int2Type<CubBackend> cub_backend,
int timing_iterations,
size_t *d_temp_storage_bytes,
cudaError_t *d_cdp_error,
void *d_temp_storage,
size_t &temp_storage_bytes,
InputIteratorT d_in,
OutputIteratorT d_out,
int num_items,
int max_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
cudaError_t retval =
thrust::cuda_cub::launcher::triple_chevron(1, 1, 0, 0)
.doit(CDPDispatchKernel<CubBackend,
InputIteratorT,
OutputIteratorT,
BeginOffsetIteratorT,
EndOffsetIteratorT,
ReductionOpT>,
cub_backend,
timing_iterations,
d_temp_storage_bytes,
d_cdp_error,
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
max_segments,
d_segment_begin_offsets,
d_segment_end_offsets,
reduction_op);
CubDebugExit(retval);
CubDebugExit(cub::detail::device_synchronize());
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes,
d_temp_storage_bytes,
sizeof(size_t) * 1,
cudaMemcpyDeviceToHost));
// Copy out error
CubDebugExit(cudaMemcpy(&retval,
d_cdp_error,
sizeof(cudaError_t) * 1,
cudaMemcpyDeviceToHost));
return retval;
}
// Specializations of Dispatch that translate the CDP backend to the appropriate
// CUB backend, and uses the CUB backend to launch the CDP kernel.
#define DEFINE_CDP_DISPATCHER(CdpBackend, CubBackend) \
template <typename InputIteratorT, \
typename OutputIteratorT, \
typename BeginOffsetIteratorT, \
typename EndOffsetIteratorT, \
typename ReductionOpT> \
cudaError_t Dispatch(Int2Type<CdpBackend>, \
int timing_iterations, \
size_t *d_temp_storage_bytes, \
cudaError_t *d_cdp_error, \
\
void *d_temp_storage, \
size_t &temp_storage_bytes, \
InputIteratorT d_in, \
OutputIteratorT d_out, \
int num_items, \
int max_segments, \
BeginOffsetIteratorT d_segment_begin_offsets, \
EndOffsetIteratorT d_segment_end_offsets, \
ReductionOpT reduction_op) \
{ \
Int2Type<CubBackend> cub_backend{}; \
return LaunchCDPKernel(cub_backend, \
timing_iterations, \
d_temp_storage_bytes, \
d_cdp_error, \
d_temp_storage, \
temp_storage_bytes, \
d_in, \
d_out, \
num_items, \
max_segments, \
d_segment_begin_offsets, \
d_segment_end_offsets, \
reduction_op); \
}
DEFINE_CDP_DISPATCHER(CDP, CUB)
DEFINE_CDP_DISPATCHER(CDP_SEGMENTED, CUB_SEGMENTED)
#undef DEFINE_CDP_DISPATCHER
#endif // TEST_CDP
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Initialize problem
template <typename InputT>
void Initialize(
GenMode gen_mode,
InputT *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
}
if (g_verbose_input)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/// Solve problem (max/custom-max functor)
template <typename ReductionOpT, typename InputT, typename _OutputT>
struct Solution
{
using OutputT = _OutputT;
using InitT = OutputT;
using AccumT = cub::detail::accumulator_t<ReductionOpT, InitT, InputT>;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, ReductionOpT reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
AccumT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (min functor)
template <typename InputT, typename _OutputT>
struct Solution<cub::Min, InputT, _OutputT>
{
using OutputT = _OutputT;
using InitT = OutputT;
using AccumT = cub::detail::accumulator_t<cub::Min, InitT, InputT>;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, cub::Min reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
AccumT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
aggregate = reduction_op(aggregate, OutputT(h_in[j]));
h_reference[i] = aggregate;
}
}
};
/// Solve problem (sum functor)
template <typename InputT, typename _OutputT>
struct Solution<cub::Sum, InputT, _OutputT>
{
using OutputT = _OutputT;
using InitT = OutputT;
using AccumT = cub::detail::accumulator_t<cub::Sum, InitT, InputT>;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, cub::Sum reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
AccumT aggregate;
InitValue(INTEGER_SEED, aggregate, 0);
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
aggregate = reduction_op(aggregate, h_in[j]);
h_reference[i] = static_cast<OutputT>(aggregate);
}
}
};
/// Solve problem (argmin functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<cub::ArgMin, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, cub::ArgMin reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
{
OutputT item(j - h_segment_begin_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
/// Solve problem (argmax functor)
template <typename InputValueT, typename OutputValueT>
struct Solution<cub::ArgMax, InputValueT, OutputValueT>
{
typedef KeyValuePair<int, OutputValueT> OutputT;
template <typename HostInputIteratorT, typename OffsetT, typename BeginOffsetIteratorT, typename EndOffsetIteratorT>
static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets, EndOffsetIteratorT h_segment_end_offsets, cub::ArgMax reduction_op)
{
for (int i = 0; i < num_segments; ++i)
{
OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent
for (int j = h_segment_begin_offsets[i]; j < h_segment_end_offsets[i]; ++j)
{
OutputT item(j - h_segment_begin_offsets[i], OutputValueT(h_in[j]));
aggregate = reduction_op(aggregate, item);
}
h_reference[i] = aggregate;
}
}
};
//---------------------------------------------------------------------
// Problem generation
//---------------------------------------------------------------------
/// Test DeviceReduce for a given problem input
template <
typename BackendT,
typename DeviceInputIteratorT,
typename DeviceOutputIteratorT,
typename HostReferenceIteratorT,
typename OffsetT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
void Test(
BackendT backend,
DeviceInputIteratorT d_in,
DeviceOutputIteratorT d_out,
OffsetT num_items,
OffsetT num_segments,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op,
HostReferenceIteratorT h_reference)
{
// Input data types
using InputT = cub::detail::value_t<DeviceInputIteratorT>;
// Allocate CDP device arrays for temp storage size and error
size_t *d_temp_storage_bytes = NULL;
cudaError_t *d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Inquire temp device storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets,
reduction_op));
// Allocate temp device storage
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(backend, 1,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets,
reduction_op));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
if (g_timing_iterations > 0)
{
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(backend, g_timing_iterations,
d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes,
d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets,
reduction_op));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
float giga_bandwidth = giga_rate * sizeof(InputT);
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak",
avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare);
}
/// Test DeviceReduce
template <
Backend BACKEND,
typename OutputValueT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT,
typename ReductionOpT>
void SolveAndTest(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets,
EndOffsetIteratorT h_segment_end_offsets,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets,
ReductionOpT reduction_op)
{
using InputValueT = cub::detail::value_t<DeviceInputIteratorT>;
using SolutionT = Solution<ReductionOpT, InputValueT, OutputValueT>;
using OutputT = typename SolutionT::OutputT;
printf("\n\n%s cub::DeviceReduce<%s> %d items (%s), %d segments\n",
BackendToString(BACKEND),
typeid(ReductionOpT).name(),
num_items,
typeid(HostInputIteratorT).name(),
num_segments);
fflush(stdout);
// Allocate and solve solution
OutputT *h_reference = new OutputT[num_segments];
SolutionT::Solve(h_in, h_reference, num_segments, h_segment_begin_offsets, h_segment_end_offsets, reduction_op);
// Run with discard iterator
DiscardOutputIterator<OffsetT> discard_itr;
Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets, reduction_op, h_reference);
// Run with output data
OutputT *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments));
CubDebugExit(cudaMemset(d_out, 0, sizeof(OutputT) * num_segments));
Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_begin_offsets, d_segment_end_offsets, reduction_op, h_reference);
// Cleanup
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (h_reference) delete[] h_reference;
}
/// Test specific problem type
template <
Backend BACKEND,
typename InputT,
typename OutputT,
typename OffsetT,
typename ReductionOpT>
void TestProblem(
OffsetT num_items,
OffsetT num_segments,
GenMode gen_mode,
ReductionOpT reduction_op)
{
printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
fflush(stdout);
// Initialize value data
InputT* h_in = new InputT[num_items];
Initialize(gen_mode, h_in, num_items);
// Initialize segment data
OffsetT *h_segment_offsets = new OffsetT[num_segments + 1];
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
// Initialize device data
OffsetT *d_segment_offsets = NULL;
InputT *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice));
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_offsets, h_segment_offsets + 1, d_segment_offsets, d_segment_offsets + 1, reduction_op);
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/// Test different operators
template <
Backend BACKEND,
typename OutputT,
typename HostInputIteratorT,
typename DeviceInputIteratorT,
typename OffsetT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
void TestByOp(
HostInputIteratorT h_in,
DeviceInputIteratorT d_in,
OffsetT num_items,
OffsetT num_segments,
BeginOffsetIteratorT h_segment_begin_offsets,
EndOffsetIteratorT h_segment_end_offsets,
BeginOffsetIteratorT d_segment_begin_offsets,
EndOffsetIteratorT d_segment_end_offsets)
{
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, CustomMax());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, Sum());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, Min());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, ArgMin());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, Max());
SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets, h_segment_end_offsets, d_segment_begin_offsets, d_segment_end_offsets, ArgMax());
}
template<typename OffsetT>
struct TransformFunctor1
{
__host__ __device__ __forceinline__ OffsetT operator()(OffsetT offset) const
{
return offset;
}
};
template<typename OffsetT>
struct TransformFunctor2
{
__host__ __device__ __forceinline__ OffsetT operator()(OffsetT offset) const
{
return offset;
}
};
/// Test different backends
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByBackend(
OffsetT num_items,
OffsetT max_segments,
GenMode gen_mode)
{
#if TEST_CDP == 0
constexpr auto NonSegmentedBackend = CUB;
constexpr auto SegmentedBackend = CUB_SEGMENTED;
#else // TEST_CDP
constexpr auto NonSegmentedBackend = CDP;
constexpr auto SegmentedBackend = CDP_SEGMENTED;
#endif // TEST_CDP
// Initialize host data
printf("\n\nInitializing %d %s -> %s (gen mode %d)... ",
num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout);
InputT *h_in = new InputT[num_items];
OffsetT *h_segment_offsets = new OffsetT[max_segments + 1];
Initialize(gen_mode, h_in, num_items);
// Initialize device data
InputT *d_in = NULL;
OffsetT *d_segment_offsets = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1)));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice));
//
// Test single-segment implementations
//
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
// Page-aligned-input tests
TestByOp<NonSegmentedBackend, OutputT>(h_in, d_in, num_items, 1,
h_segment_offsets, h_segment_offsets + 1, (OffsetT*) NULL, (OffsetT*)NULL);
// Non-page-aligned-input tests
if (num_items > 1)
{
InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input);
TestByOp<NonSegmentedBackend, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1,
h_segment_offsets, h_segment_offsets + 1, (OffsetT*) NULL, (OffsetT*)NULL);
}
//
// Test segmented implementation
//
// Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment
int max_items_per_segment = 128000;
for (int num_segments = cub::DivideAndRoundUp(num_items, max_items_per_segment);
num_segments < max_segments;
num_segments = (num_segments * 32) + 1)
{
// Test with segment pointer
InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input);
CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice));
TestByOp<SegmentedBackend, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_offsets, h_segment_offsets + 1, d_segment_offsets, d_segment_offsets + 1);
// Test with segment iterator
typedef CastOp<OffsetT> IdentityOpT;
IdentityOpT identity_op;
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr(
h_segment_offsets,
identity_op);
TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr(
d_segment_offsets,
identity_op);
TestByOp<SegmentedBackend, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_offsets_itr, h_segment_offsets_itr + 1, d_segment_offsets_itr, d_segment_offsets_itr + 1);
// Test with transform iterators of different types
typedef TransformFunctor1<OffsetT> TransformFunctor1T;
typedef TransformFunctor2<OffsetT> TransformFunctor2T;
TransformInputIterator<OffsetT, TransformFunctor1T, OffsetT*, OffsetT> h_segment_begin_offsets_itr(h_segment_offsets, TransformFunctor1T());
TransformInputIterator<OffsetT, TransformFunctor2T, OffsetT*, OffsetT> h_segment_end_offsets_itr(h_segment_offsets + 1, TransformFunctor2T());
TransformInputIterator<OffsetT, TransformFunctor1T, OffsetT*, OffsetT> d_segment_begin_offsets_itr(d_segment_offsets, TransformFunctor1T());
TransformInputIterator<OffsetT, TransformFunctor2T, OffsetT*, OffsetT> d_segment_end_offsets_itr(d_segment_offsets + 1, TransformFunctor2T());
TestByOp<SegmentedBackend, OutputT>(h_in, d_in, num_items, num_segments,
h_segment_begin_offsets_itr, h_segment_end_offsets_itr,
d_segment_begin_offsets_itr, d_segment_end_offsets_itr);
}
if (h_in) delete[] h_in;
if (h_segment_offsets) delete[] h_segment_offsets;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets));
}
/// Test different input-generation modes
template <
typename InputT,
typename OutputT,
typename OffsetT>
void TestByGenMode(
OffsetT num_items,
OffsetT max_segments)
{
//
// Test pointer support using different input-generation modes
//
TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM);
TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED);
TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM);
//
// Test iterator support using a constant-iterator and SUM
//
InputT val;
InitValue(UNIFORM, val, 0);
ConstantInputIterator<InputT, OffsetT> h_in(val);
OffsetT *h_segment_offsets = new OffsetT[1 + 1];
InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input);
#if TEST_CDP == 0
constexpr auto Backend = CUB;
#else // TEST_CDP
constexpr auto Backend = CDP;
#endif // TEST_CDP
SolveAndTest<Backend, OutputT>(h_in, h_in, num_items, 1,
h_segment_offsets, h_segment_offsets + 1, (OffsetT*) NULL, (OffsetT*)NULL, Sum());
if (h_segment_offsets) delete[] h_segment_offsets;
}
/// Test different problem sizes
template <typename InputT, typename OutputT, typename OffsetT>
void TestBySize(OffsetT max_items, OffsetT max_segments, OffsetT tile_size)
{
// Test 0, 1, many
TestByGenMode<InputT, OutputT>(0, max_segments);
TestByGenMode<InputT, OutputT>(1, max_segments);
TestByGenMode<InputT, OutputT>(max_items, max_segments);
// Test random problem sizes from a log-distribution [8, max_items-ish)
int num_iterations = 8;
double max_exp = log(double(max_items)) / log(double(2.0));
for (int i = 0; i < num_iterations; ++i)
{
OffsetT num_items = (OffsetT)pow(2.0, RandomValue(max_exp - 3.0) + 3.0);
TestByGenMode<InputT, OutputT>(num_items, max_segments);
}
//
// White-box testing of single-segment problems around specific sizes
//
#if TEST_CDP == 0
constexpr auto Backend = CUB;
#else // TEST_CDP
constexpr auto Backend = CDP;
#endif // TEST_CDP
// Tile-boundaries: multiple blocks, one tile per block
TestProblem<Backend, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum());
// Tile-boundaries: multiple blocks, multiple tiles per block
OffsetT sm_occupancy = 32;
OffsetT occupancy = tile_size * sm_occupancy * g_sm_count;
TestProblem<Backend, InputT, OutputT>(occupancy, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum());
TestProblem<Backend, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum());
};
class CustomInputT
{
char m_val{};
public:
__host__ __device__ explicit CustomInputT(char val)
: m_val(val)
{}
__host__ __device__ int get() const { return static_cast<int>(m_val); }
};
class CustomAccumulatorT
{
int m_val{0};
int m_magic_value{42};
__host__ __device__ CustomAccumulatorT(int val)
: m_val(val)
{}
public:
__host__ __device__ CustomAccumulatorT()
{}
__host__ __device__ CustomAccumulatorT(const CustomAccumulatorT &in)
: m_val(in.is_valid() * in.get())
, m_magic_value(in.is_valid() * 42)
{}
__host__ __device__ void operator=(const CustomInputT &in)
{
if (this->is_valid())
{
m_val = in.get();
}
}
__host__ __device__ void operator=(const CustomAccumulatorT &in)
{
if (this->is_valid() && in.is_valid())
{
m_val = in.get();
}
}
__host__ __device__ CustomAccumulatorT
operator+(const CustomInputT &in) const
{
const int multiplier = this->is_valid();
return {(m_val + in.get()) * multiplier};
}
__host__ __device__ CustomAccumulatorT
operator+(const CustomAccumulatorT &in) const
{
const int multiplier = this->is_valid() && in.is_valid();
return {(m_val + in.get()) * multiplier};
}
__host__ __device__ int get() const { return m_val; }
__host__ __device__ bool is_valid() const { return m_magic_value == 42; }
};
class CustomOutputT
{
bool *m_d_flag{};
int m_expected{};
public:
__host__ __device__ CustomOutputT(bool *d_flag, int expected)
: m_d_flag(d_flag)
, m_expected(expected)
{}
__host__ __device__ void operator=(const CustomAccumulatorT &accum) const
{
*m_d_flag = accum.is_valid() && (accum.get() == m_expected);
}
};
__global__ void InitializeTestAccumulatorTypes(int num_items,
int expected,
bool *d_flag,
CustomInputT *d_in,
CustomOutputT *d_out)
{
const int idx = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
if (idx < num_items)
{
d_in[idx] = CustomInputT(1);
}
if (idx == 0)
{
*d_out = CustomOutputT{d_flag, expected};
}
}
template <typename T,
typename OffsetT>
void TestBigIndicesHelper(OffsetT num_items)
{
thrust::constant_iterator<T> const_iter(T{1});
thrust::device_vector<std::size_t> out(1);
std::size_t* d_out = thrust::raw_pointer_cast(out.data());
std::uint8_t *d_temp_storage{};
std::size_t temp_storage_bytes{};
CubDebugExit(
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, const_iter, d_out, num_items));
thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes);
d_temp_storage = thrust::raw_pointer_cast(temp_storage.data());
CubDebugExit(
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, const_iter, d_out, num_items));
std::size_t result = out[0];
AssertEquals(result, num_items);
}
template <typename T>
void TestBigIndices()
{
TestBigIndicesHelper<T, std::uint32_t>(1ull << 30);
TestBigIndicesHelper<T, std::uint32_t>(1ull << 31);
TestBigIndicesHelper<T, std::uint32_t>((1ull << 32) - 1);
TestBigIndicesHelper<T, std::uint64_t>(1ull << 33);
}
void TestAccumulatorTypes()
{
const int num_items = 2 * 1024 * 1024;
const int expected = num_items;
const int block_size = 256;
const int grid_size = (num_items + block_size - 1) / block_size;
CustomInputT *d_in{};
CustomOutputT *d_out{};
CustomAccumulatorT init{};
bool *d_flag{};
CubDebugExit(
g_allocator.DeviceAllocate((void **)&d_out, sizeof(CustomOutputT)));
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_flag, sizeof(bool)));
CubDebugExit(g_allocator.DeviceAllocate((void **)&d_in,
sizeof(CustomInputT) * num_items));
InitializeTestAccumulatorTypes<<<grid_size, block_size>>>(num_items,
expected,
d_flag,
d_in,
d_out);
std::uint8_t *d_temp_storage{};
std::size_t temp_storage_bytes{};
CubDebugExit(cub::DeviceReduce::Reduce(d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
cub::Sum{},
init));
CubDebugExit(
g_allocator.DeviceAllocate((void **)&d_temp_storage, temp_storage_bytes));
CubDebugExit(cudaMemset(d_temp_storage, 1, temp_storage_bytes));
CubDebugExit(cub::DeviceReduce::Reduce(d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
num_items,
cub::Sum{},
init));
bool ok{};
CubDebugExit(cudaMemcpy(&ok, d_flag, sizeof(bool), cudaMemcpyDeviceToHost));
AssertTrue(ok);
CubDebugExit(g_allocator.DeviceFree(d_out));
CubDebugExit(g_allocator.DeviceFree(d_in));
}
template <typename InputT, typename OutputT, typename OffsetT>
struct GetTileSize
{
OffsetT max_items{};
OffsetT max_segments{};
OffsetT tile_size{};
GetTileSize(OffsetT max_items, OffsetT max_segments)
: max_items(max_items)
, max_segments(max_segments)
{}
template <typename ActivePolicyT>
CUB_RUNTIME_FUNCTION cudaError_t Invoke()
{
this->tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS *
ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD;
return cudaSuccess;
}
};
/// Test problem type
template <typename InputT, typename OutputT, typename OffsetT>
void TestType(OffsetT max_items, OffsetT max_segments)
{
// Inspect the tuning policies to determine this arch's tile size:
using MaxPolicyT =
typename DeviceReducePolicy<InputT, OffsetT, cub::Sum>::MaxPolicy;
GetTileSize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments);
CubDebugExit(MaxPolicyT::Invoke(g_ptx_version, dispatch));
TestBySize<InputT, OutputT>(max_items, max_segments, dispatch.tile_size);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
typedef int OffsetT;
OffsetT max_items = 27000000;
OffsetT max_segments = 34000;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
g_verbose_input = args.CheckCmdLineFlag("v2");
args.GetCmdLineArgument("n", max_items);
args.GetCmdLineArgument("s", max_segments);
args.GetCmdLineArgument("i", g_timing_iterations);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--s=<num segments> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
// Get ptx version
CubDebugExit(PtxVersion(g_ptx_version));
// Get SM count
g_sm_count = args.deviceProp.multiProcessorCount;
// %PARAM% TEST_CDP cdp 0:1
// %PARAM% TEST_TYPES types 0:1:2:3
#if TEST_TYPES == 0
TestType<signed char, signed char>(max_items, max_segments);
TestType<unsigned char, unsigned char>(max_items, max_segments);
TestType<signed char, int>(max_items, max_segments);
#elif TEST_TYPES == 1
TestType<short, short>(max_items, max_segments);
TestType<int, int>(max_items, max_segments);
TestType<long, long>(max_items, max_segments);
TestType<long long, long long>(max_items, max_segments);
#elif TEST_TYPES == 2
TestType<uchar2, uchar2>(max_items, max_segments);
TestType<uint2, uint2>(max_items, max_segments);
TestType<ulonglong2, ulonglong2>(max_items, max_segments);
TestType<ulonglong4, ulonglong4>(max_items, max_segments);
#else // TEST_TYPES == 3
TestType<TestFoo, TestFoo>(max_items, max_segments);
TestType<TestBar, TestBar>(max_items, max_segments);
TestAccumulatorTypes();
TestBigIndices<std::size_t>();
#endif
printf("\n");
return 0;
}
|
22fcf3bfc18376932980ffe6e98cdc6a2632166a.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
// SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION &
// AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <cassert>
#include <cstring>
#include <iostream>
#include <vector>
#include "NvInfer.h"
#include "paddle/fluid/inference/tensorrt/plugin/common/common.cuh"
#include "paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace_2(int32_t ld,
int32_t const* inputIds0,
int32_t const* inputIds1,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
int32_t IdsSize0,
int32_t IdsSize1,
T* output) {
hipcub::Sum pairSum;
int32_t const s = blockIdx.x;
int32_t const b = blockIdx.y;
int32_t const sumS = inputIds0[b];
int32_t const s_b = inputIds0[b + 1] - sumS;
if (s >= s_b) {
return; // This CTA has nothing to do
}
T const rld = T(1.f) / T(ld);
int32_t const seqPos = sumS + s;
extern __shared__ int32_t word_id[];
if (threadIdx.x == 0) {
if (static_cast<int32_t const*>(inputIds1)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds1)[seqPos] >= IdsSize1) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[0] = static_cast<int32_t const*>(inputIds1)[seqPos];
}
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
int32_t const poffset = blockIdx.x * ld;
int32_t const outOffset = seqPos * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB) {
T p(mIdsEmbDev0[poffset + it]); // pos id
T val = p;
int32_t const offset = word_id[0] * ld;
val += mIdsEmbDev1[offset + it];
output[outOffset + it] = val;
T const rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace_3(int32_t ld,
int32_t const* inputIds0,
int32_t const* inputIds1,
int32_t const* inputIds2,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
T* output) {
hipcub::Sum pairSum;
int32_t const s = blockIdx.x;
int32_t const b = blockIdx.y;
int32_t const sumS = inputIds0[b];
int32_t const s_b = inputIds0[b + 1] - sumS;
if (s >= s_b) {
return; // This CTA has nothing to do
}
T const rld = T(1.f) / T(ld);
int32_t const seqPos = sumS + s;
extern __shared__ int32_t word_id[];
if (threadIdx.x == 0) {
if (static_cast<int32_t const*>(inputIds1)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds1)[seqPos] >= IdsSize1) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[0] = static_cast<int32_t const*>(inputIds1)[seqPos];
}
if (static_cast<int32_t const*>(inputIds2)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds2)[seqPos] >= IdsSize2) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[1] = static_cast<int32_t const*>(inputIds2)[seqPos];
}
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
int32_t const poffset = blockIdx.x * ld;
int32_t const outOffset = seqPos * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB) {
T p(mIdsEmbDev0[poffset + it]); // pos id
T val = p;
int32_t const offset0 = word_id[0] * ld;
val += mIdsEmbDev1[offset0 + it];
int32_t const offset1 = word_id[1] * ld;
val += mIdsEmbDev2[offset1 + it];
output[outOffset + it] = val;
T const rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace_4(int32_t ld,
int32_t const* inputIds0,
int32_t const* inputIds1,
int32_t const* inputIds2,
int32_t const* inputIds3,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
T const* mIdsEmbDev3,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
int32_t IdsSize3,
T* output) {
hipcub::Sum pairSum;
int32_t const s = blockIdx.x;
int32_t const b = blockIdx.y;
int32_t const sumS = inputIds0[b];
int32_t const s_b = inputIds0[b + 1] - sumS;
if (s >= s_b) {
return; // This CTA has nothing to do
}
T const rld = T(1.f) / T(ld);
int32_t const seqPos = sumS + s;
extern __shared__ int32_t word_id[];
if (threadIdx.x == 0) {
if (static_cast<int32_t const*>(inputIds1)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds1)[seqPos] >= IdsSize1) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[0] = static_cast<int32_t const*>(inputIds1)[seqPos];
}
if (static_cast<int32_t const*>(inputIds2)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds2)[seqPos] >= IdsSize2) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[1] = static_cast<int32_t const*>(inputIds2)[seqPos];
}
if (static_cast<int32_t const*>(inputIds3)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds3)[seqPos] >= IdsSize3) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[2] = static_cast<int32_t const*>(inputIds3)[seqPos];
}
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
int32_t const poffset = blockIdx.x * ld;
int32_t const outOffset = seqPos * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB) {
T p(mIdsEmbDev0[poffset + it]); // pos id
T val = p;
int32_t const offset0 = word_id[0] * ld;
val += mIdsEmbDev1[offset0 + it];
int32_t const offset1 = word_id[1] * ld;
val += mIdsEmbDev2[offset1 + it];
int32_t const offset2 = word_id[2] * ld;
val += mIdsEmbDev3[offset2 + it];
output[outOffset + it] = val;
T const rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T>
int32_t embSkipLayerNormHFace_2(hipStream_t stream,
int32_t ld,
int32_t B,
int32_t S,
int const* inputIds0,
int const* inputIds1,
int32_t nbLookupTables,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
int32_t IdsSize0,
int32_t IdsSize1,
T* output) {
constexpr int32_t tpb = 256;
dim3 const grid(S, B, 1);
dim3 const block(tpb, 1, 1);
size_t cache_size = sizeof(int32_t) * (nbLookupTables - 1);
hipLaunchKernelGGL(( embLayerNormKernelHFace_2<T, tpb>)
, dim3(grid), dim3(block), cache_size, stream, ld,
inputIds0,
inputIds1,
beta,
gamma,
mIdsEmbDev0,
mIdsEmbDev1,
IdsSize0,
IdsSize1,
output);
return hipPeekAtLastError();
}
template <typename T>
int32_t embSkipLayerNormHFace_3(hipStream_t stream,
int32_t ld,
int32_t B,
int32_t S,
int const* inputIds0,
int const* inputIds1,
int const* inputIds2,
int32_t nbLookupTables,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
T* output) {
constexpr int32_t tpb = 256;
dim3 const grid(S, B, 1);
dim3 const block(tpb, 1, 1);
size_t cache_size = sizeof(int32_t) * (nbLookupTables - 1);
hipLaunchKernelGGL(( embLayerNormKernelHFace_3<T, tpb>)
, dim3(grid), dim3(block), cache_size, stream, ld,
inputIds0,
inputIds1,
inputIds2,
beta,
gamma,
mIdsEmbDev0,
mIdsEmbDev1,
mIdsEmbDev2,
IdsSize0,
IdsSize1,
IdsSize2,
output);
return hipPeekAtLastError();
}
template <typename T>
int32_t embSkipLayerNormHFace_4(hipStream_t stream,
int32_t ld,
int32_t B,
int32_t S,
int const* inputIds0,
int const* inputIds1,
int const* inputIds2,
int const* inputIds3,
int32_t nbLookupTables,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
T const* mIdsEmbDev3,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
int32_t IdsSize3,
T* output) {
constexpr int32_t tpb = 256;
dim3 const grid(S, B, 1);
dim3 const block(tpb, 1, 1);
size_t cache_size = sizeof(int32_t) * (nbLookupTables - 1);
hipLaunchKernelGGL(( embLayerNormKernelHFace_4<T, tpb>)
, dim3(grid), dim3(block), cache_size, stream, ld,
inputIds0,
inputIds1,
inputIds2,
inputIds3,
beta,
gamma,
mIdsEmbDev0,
mIdsEmbDev1,
mIdsEmbDev2,
mIdsEmbDev3,
IdsSize0,
IdsSize1,
IdsSize2,
IdsSize3,
output);
return hipPeekAtLastError();
}
template int32_t embSkipLayerNormHFace_2<float>(hipStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
float const*,
float const*,
int32_t,
int32_t,
float*);
template int32_t embSkipLayerNormHFace_3<float>(hipStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
float const*,
float const*,
float const*,
int32_t,
int32_t,
int32_t,
float*);
template int32_t embSkipLayerNormHFace_4<float>(hipStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
float const*,
float const*,
float const*,
float const*,
int32_t,
int32_t,
int32_t,
int32_t,
float*);
template int32_t embSkipLayerNormHFace_2<half>(hipStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
half const*,
half const*,
int32_t,
int32_t,
half*);
template int32_t embSkipLayerNormHFace_3<half>(hipStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
half const*,
half const*,
half const*,
int32_t,
int32_t,
int32_t,
half*);
template int32_t embSkipLayerNormHFace_4<half>(hipStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
half const*,
half const*,
half const*,
half const*,
int32_t,
int32_t,
int32_t,
int32_t,
half*);
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 22fcf3bfc18376932980ffe6e98cdc6a2632166a.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
// SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION &
// AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cassert>
#include <cstring>
#include <iostream>
#include <vector>
#include "NvInfer.h"
#include "paddle/fluid/inference/tensorrt/plugin/common/common.cuh"
#include "paddle/fluid/inference/tensorrt/plugin/many_emb_layernorm_varseqlen_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace_2(int32_t ld,
int32_t const* inputIds0,
int32_t const* inputIds1,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
int32_t IdsSize0,
int32_t IdsSize1,
T* output) {
cub::Sum pairSum;
int32_t const s = blockIdx.x;
int32_t const b = blockIdx.y;
int32_t const sumS = inputIds0[b];
int32_t const s_b = inputIds0[b + 1] - sumS;
if (s >= s_b) {
return; // This CTA has nothing to do
}
T const rld = T(1.f) / T(ld);
int32_t const seqPos = sumS + s;
extern __shared__ int32_t word_id[];
if (threadIdx.x == 0) {
if (static_cast<int32_t const*>(inputIds1)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds1)[seqPos] >= IdsSize1) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[0] = static_cast<int32_t const*>(inputIds1)[seqPos];
}
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
int32_t const poffset = blockIdx.x * ld;
int32_t const outOffset = seqPos * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB) {
T p(mIdsEmbDev0[poffset + it]); // pos id
T val = p;
int32_t const offset = word_id[0] * ld;
val += mIdsEmbDev1[offset + it];
output[outOffset + it] = val;
T const rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace_3(int32_t ld,
int32_t const* inputIds0,
int32_t const* inputIds1,
int32_t const* inputIds2,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
T* output) {
cub::Sum pairSum;
int32_t const s = blockIdx.x;
int32_t const b = blockIdx.y;
int32_t const sumS = inputIds0[b];
int32_t const s_b = inputIds0[b + 1] - sumS;
if (s >= s_b) {
return; // This CTA has nothing to do
}
T const rld = T(1.f) / T(ld);
int32_t const seqPos = sumS + s;
extern __shared__ int32_t word_id[];
if (threadIdx.x == 0) {
if (static_cast<int32_t const*>(inputIds1)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds1)[seqPos] >= IdsSize1) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[0] = static_cast<int32_t const*>(inputIds1)[seqPos];
}
if (static_cast<int32_t const*>(inputIds2)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds2)[seqPos] >= IdsSize2) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[1] = static_cast<int32_t const*>(inputIds2)[seqPos];
}
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
int32_t const poffset = blockIdx.x * ld;
int32_t const outOffset = seqPos * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB) {
T p(mIdsEmbDev0[poffset + it]); // pos id
T val = p;
int32_t const offset0 = word_id[0] * ld;
val += mIdsEmbDev1[offset0 + it];
int32_t const offset1 = word_id[1] * ld;
val += mIdsEmbDev2[offset1 + it];
output[outOffset + it] = val;
T const rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T, unsigned TPB>
__global__ void embLayerNormKernelHFace_4(int32_t ld,
int32_t const* inputIds0,
int32_t const* inputIds1,
int32_t const* inputIds2,
int32_t const* inputIds3,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
T const* mIdsEmbDev3,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
int32_t IdsSize3,
T* output) {
cub::Sum pairSum;
int32_t const s = blockIdx.x;
int32_t const b = blockIdx.y;
int32_t const sumS = inputIds0[b];
int32_t const s_b = inputIds0[b + 1] - sumS;
if (s >= s_b) {
return; // This CTA has nothing to do
}
T const rld = T(1.f) / T(ld);
int32_t const seqPos = sumS + s;
extern __shared__ int32_t word_id[];
if (threadIdx.x == 0) {
if (static_cast<int32_t const*>(inputIds1)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds1)[seqPos] >= IdsSize1) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[0] = static_cast<int32_t const*>(inputIds1)[seqPos];
}
if (static_cast<int32_t const*>(inputIds2)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds2)[seqPos] >= IdsSize2) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[1] = static_cast<int32_t const*>(inputIds2)[seqPos];
}
if (static_cast<int32_t const*>(inputIds3)[seqPos] < 0 ||
static_cast<int32_t const*>(inputIds3)[seqPos] >= IdsSize3) {
printf(
"Error!!!!!!(embLayerNormVarSeqlenPlugin): ID cannot be lookup "
"table: ID < 0 or ID > max ");
return;
} else {
word_id[2] = static_cast<int32_t const*>(inputIds3)[seqPos];
}
}
__syncthreads();
// 2. load pos/tok/word embeddings and add them toghether
// offset into embeddings is given by wordId * hidden_size
int32_t const poffset = blockIdx.x * ld;
int32_t const outOffset = seqPos * ld;
// the output offset is given by b * (S*hidden_size) + s * hidden_size
kvp<T> threadData(0, 0);
for (int32_t it = threadIdx.x; it < ld; it += TPB) {
T p(mIdsEmbDev0[poffset + it]); // pos id
T val = p;
int32_t const offset0 = word_id[0] * ld;
val += mIdsEmbDev1[offset0 + it];
int32_t const offset1 = word_id[1] * ld;
val += mIdsEmbDev2[offset1 + it];
int32_t const offset2 = word_id[2] * ld;
val += mIdsEmbDev3[offset2 + it];
output[outOffset + it] = val;
T const rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
// 3. layer norm on the sum
layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output);
}
template <typename T>
int32_t embSkipLayerNormHFace_2(cudaStream_t stream,
int32_t ld,
int32_t B,
int32_t S,
int const* inputIds0,
int const* inputIds1,
int32_t nbLookupTables,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
int32_t IdsSize0,
int32_t IdsSize1,
T* output) {
constexpr int32_t tpb = 256;
dim3 const grid(S, B, 1);
dim3 const block(tpb, 1, 1);
size_t cache_size = sizeof(int32_t) * (nbLookupTables - 1);
embLayerNormKernelHFace_2<T, tpb>
<<<grid, block, cache_size, stream>>>(ld,
inputIds0,
inputIds1,
beta,
gamma,
mIdsEmbDev0,
mIdsEmbDev1,
IdsSize0,
IdsSize1,
output);
return cudaPeekAtLastError();
}
template <typename T>
int32_t embSkipLayerNormHFace_3(cudaStream_t stream,
int32_t ld,
int32_t B,
int32_t S,
int const* inputIds0,
int const* inputIds1,
int const* inputIds2,
int32_t nbLookupTables,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
T* output) {
constexpr int32_t tpb = 256;
dim3 const grid(S, B, 1);
dim3 const block(tpb, 1, 1);
size_t cache_size = sizeof(int32_t) * (nbLookupTables - 1);
embLayerNormKernelHFace_3<T, tpb>
<<<grid, block, cache_size, stream>>>(ld,
inputIds0,
inputIds1,
inputIds2,
beta,
gamma,
mIdsEmbDev0,
mIdsEmbDev1,
mIdsEmbDev2,
IdsSize0,
IdsSize1,
IdsSize2,
output);
return cudaPeekAtLastError();
}
template <typename T>
int32_t embSkipLayerNormHFace_4(cudaStream_t stream,
int32_t ld,
int32_t B,
int32_t S,
int const* inputIds0,
int const* inputIds1,
int const* inputIds2,
int const* inputIds3,
int32_t nbLookupTables,
float const* beta,
float const* gamma,
T const* mIdsEmbDev0,
T const* mIdsEmbDev1,
T const* mIdsEmbDev2,
T const* mIdsEmbDev3,
int32_t IdsSize0,
int32_t IdsSize1,
int32_t IdsSize2,
int32_t IdsSize3,
T* output) {
constexpr int32_t tpb = 256;
dim3 const grid(S, B, 1);
dim3 const block(tpb, 1, 1);
size_t cache_size = sizeof(int32_t) * (nbLookupTables - 1);
embLayerNormKernelHFace_4<T, tpb>
<<<grid, block, cache_size, stream>>>(ld,
inputIds0,
inputIds1,
inputIds2,
inputIds3,
beta,
gamma,
mIdsEmbDev0,
mIdsEmbDev1,
mIdsEmbDev2,
mIdsEmbDev3,
IdsSize0,
IdsSize1,
IdsSize2,
IdsSize3,
output);
return cudaPeekAtLastError();
}
template int32_t embSkipLayerNormHFace_2<float>(cudaStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
float const*,
float const*,
int32_t,
int32_t,
float*);
template int32_t embSkipLayerNormHFace_3<float>(cudaStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
float const*,
float const*,
float const*,
int32_t,
int32_t,
int32_t,
float*);
template int32_t embSkipLayerNormHFace_4<float>(cudaStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
float const*,
float const*,
float const*,
float const*,
int32_t,
int32_t,
int32_t,
int32_t,
float*);
template int32_t embSkipLayerNormHFace_2<half>(cudaStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
half const*,
half const*,
int32_t,
int32_t,
half*);
template int32_t embSkipLayerNormHFace_3<half>(cudaStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
half const*,
half const*,
half const*,
int32_t,
int32_t,
int32_t,
half*);
template int32_t embSkipLayerNormHFace_4<half>(cudaStream_t,
int32_t,
int32_t,
int32_t,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t const*,
int32_t,
float const*,
float const*,
half const*,
half const*,
half const*,
half const*,
int32_t,
int32_t,
int32_t,
int32_t,
half*);
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
56cae366e998ff5144fbfbb2bd9d3c8cbce9aac9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define ALLOC_SIZE 1024
#define ACCESS_DIR 1
__global__ void access_from_min_into_max_kernel(int *min, int *max) {
#ifdef R
volatile int i = min[max-min];
#elif W
min[max-min] = 42;
#endif
}
__global__ void access_from_max_into_min_kernel(int *min, int *max) {
#ifdef R
volatile int i = max[-1*(max-(min+(ALLOC_SIZE-1)))];
#elif W
max[-1*(max-(min+(ALLOC_SIZE-1)))] = 42;
#endif
}
int main(int argc, char** argv) {
int *x, *y, *min, *max;
hipMalloc((void**)&x, ALLOC_SIZE*sizeof(int));
hipMalloc((void**)&y, ALLOC_SIZE*sizeof(int));
min = (x < y) ? x : y;
max = (x < y) ? y : x;
if (ACCESS_DIR == 0)
hipLaunchKernelGGL(( access_from_min_into_max_kernel), dim3(1),dim3(1), 0, 0, min,max);
else
hipLaunchKernelGGL(( access_from_max_into_min_kernel), dim3(1),dim3(1), 0, 0, min,max);
hipFree(x);
hipFree(y);
hipDeviceReset();
return 0;
}
| 56cae366e998ff5144fbfbb2bd9d3c8cbce9aac9.cu | #include <stdio.h>
#define ALLOC_SIZE 1024
#define ACCESS_DIR 1
__global__ void access_from_min_into_max_kernel(int *min, int *max) {
#ifdef R
volatile int i = min[max-min];
#elif W
min[max-min] = 42;
#endif
}
__global__ void access_from_max_into_min_kernel(int *min, int *max) {
#ifdef R
volatile int i = max[-1*(max-(min+(ALLOC_SIZE-1)))];
#elif W
max[-1*(max-(min+(ALLOC_SIZE-1)))] = 42;
#endif
}
int main(int argc, char** argv) {
int *x, *y, *min, *max;
cudaMalloc((void**)&x, ALLOC_SIZE*sizeof(int));
cudaMalloc((void**)&y, ALLOC_SIZE*sizeof(int));
min = (x < y) ? x : y;
max = (x < y) ? y : x;
if (ACCESS_DIR == 0)
access_from_min_into_max_kernel<<<1,1>>>(min,max);
else
access_from_max_into_min_kernel<<<1,1>>>(min,max);
cudaFree(x);
cudaFree(y);
cudaDeviceReset();
return 0;
}
|
94226d1768af40e7dc086708a91709fc493f3b64.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#include <unistd.h>
#include <thread>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "kernel.h"
#include "support/common.h"
#include "support/timer.h"
#include "support/verify.h"
// Params
struct Params {
int device;
int n_gpu_threads;
int n_gpu_blocks;
int n_threads;
int n_warmup;
int n_reps;
const char *file_name;
const char *comparison_file;
int switching_limit;
Params(int argc, char **argv) {
device = 0;
n_gpu_threads = 256;
n_gpu_blocks = 8;
n_threads = 2;
n_warmup = 1;
n_reps = 1;
file_name = "input/NYR_input.dat";
comparison_file = "output/NYR_bfs.out";
switching_limit = 128;
int opt;
while((opt = getopt(argc, argv, "hd:i:g:t:w:r:f:c:l:")) >= 0) {
switch(opt) {
case 'h':
usage();
exit(0);
break;
case 'd': device = atoi(optarg); break;
case 'i': n_gpu_threads = atoi(optarg); break;
case 'g': n_gpu_blocks = atoi(optarg); break;
case 't': n_threads = atoi(optarg); break;
case 'w': n_warmup = atoi(optarg); break;
case 'r': n_reps = atoi(optarg); break;
case 'f': file_name = optarg; break;
case 'c': comparison_file = optarg; break;
case 'l': switching_limit = atoi(optarg); break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(n_gpu_threads > 0 && "Invalid # of device threads!");
assert(n_gpu_blocks > 0 && "Invalid # of device blocks!");
assert(n_threads > 0 && "Invalid # of host threads!");
}
void usage() {
fprintf(stderr,
"\nUsage: ./sssp [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -d <D> GPU device ID (default=0)"
"\n -i <I> # of device threads per block (default=256)"
"\n -g <G> # of device blocks (default=8)"
"\n -t <T> # of host threads (default=2)"
"\n -w <W> # of untimed warmup iterations (default=1)"
"\n -r <R> # of timed repetition iterations (default=1)"
"\n"
"\nBenchmark-specific options:"
"\n -f <F> name of input file with control points (default=input/NYR_input.dat)"
"\n -c <C> comparison file (default=output/NYR_bfs_BFS.out)"
"\n -l <L> switching limit (default=128)"
"\n");
}
};
// Input Data
int read_input_size(int &n_nodes, int &n_edges, const Params &p) {
FILE *fp = fopen(p.file_name, "r");
if (fp == NULL) {
printf("Error: failed to read file %s. Exit\n", p.file_name);
return -1;
}
fscanf(fp, "%d", &n_nodes);
fscanf(fp, "%d", &n_edges);
if(fp) fclose(fp);
return 0;
}
void read_input(int &source, Node *&h_nodes, Edge *&h_edges, const Params &p) {
int start, edgeno;
int n_nodes, n_edges;
int id, cost;
FILE *fp = fopen(p.file_name, "r");
fscanf(fp, "%d", &n_nodes);
fscanf(fp, "%d", &n_edges);
fscanf(fp, "%d", &source);
printf("Number of nodes = %d\t", n_nodes);
printf("Number of edges = %d\t", n_edges);
// initalize the memory: Nodes
for(int i = 0; i < n_nodes; i++) {
fscanf(fp, "%d %d", &start, &edgeno);
h_nodes[i].x = start;
h_nodes[i].y = edgeno;
}
#if PRINT_ALL
for(int i = 0; i < n_nodes; i++) {
printf("%d, %d\n", h_nodes[i].x, h_nodes[i].y);
}
#endif
// initalize the memory: Edges
for(int i = 0; i < n_edges; i++) {
fscanf(fp, "%d", &id);
fscanf(fp, "%d", &cost);
h_edges[i].x = id;
h_edges[i].y = -cost;
}
if(fp) fclose(fp);
}
// GPU kernel
__global__ void SSSP_gpu(
const Node *__restrict__ graph_nodes_av,
const Edge *__restrict__ graph_edges_av,
int *__restrict__ cost,
int *__restrict__ color,
const int *__restrict__ q1,
int *__restrict__ q2,
const int *__restrict__ n_t,
int *__restrict__ head,
int *__restrict__ tail,
int *__restrict__ overflow,
const int *__restrict__ gray_shade,
int *__restrict__ iter)
{
__shared__ int l_mem[W_QUEUE_SIZE+2];
__shared__ int tail_bin;
int* l_q2 = l_mem;
int* shift = l_mem + W_QUEUE_SIZE;
int* base = l_mem + W_QUEUE_SIZE + 1;
const int tid = threadIdx.x;
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
const int WG_SIZE = blockDim.x;
int n_t_local = *n_t; // atomicAdd(n_t, 0);
int gray_shade_local = *gray_shade; // atomicAdd(&gray_shade[0], 0);
if(tid == 0) {
// Reset queue
tail_bin = 0;
}
// Fetch frontier elements from the queue
if(tid == 0)
*base = atomicAdd(&head[0], WG_SIZE);
__syncthreads();
int my_base = *base;
while(my_base < n_t_local) {
// If local queue might overflow
if(tail_bin >= W_QUEUE_SIZE / 2) {
if(tid == 0) {
// Add local tail_bin to tail
*shift = atomicAdd(&tail[0], tail_bin);
}
__syncthreads();
int local_shift = tid;
while(local_shift < tail_bin) {
q2[*shift + local_shift] = l_q2[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
__syncthreads();
if(tid == 0) {
// Reset local queue
tail_bin = 0;
}
__syncthreads();
}
if(my_base + tid < n_t_local && *overflow == 0) {
// Visit a node from the current frontier
int pid = q1[my_base + tid];
//////////////// Visit node ///////////////////////////
atomicExch(&color[pid], BLACK); // Node visited
int cur_cost = cost[pid]; // atomicAdd(&cost[pid], 0); // Look up shortest-path distance to this node
Node cur_node;
cur_node.x = graph_nodes_av[pid].x;
cur_node.y = graph_nodes_av[pid].y;
Edge cur_edge;
// For each outgoing edge
for(int i = cur_node.x; i < cur_node.y + cur_node.x; i++) {
cur_edge.x = graph_edges_av[i].x;
cur_edge.y = graph_edges_av[i].y;
int id = cur_edge.x;
int cost_local = cur_edge.y;
cost_local += cur_cost;
int orig_cost = atomicMax(&cost[id], cost_local);
if(orig_cost < cost_local) {
int old_color = atomicMax(&color[id], gray_shade_local);
if(old_color != gray_shade_local) {
// Push to the queue
int tail_index = atomicAdd(&tail_bin, 1);
if(tail_index >= W_QUEUE_SIZE) {
*overflow = 1;
} else
l_q2[tail_index] = id;
}
}
}
}
if(tid == 0)
*base = atomicAdd(&head[0], WG_SIZE); // Fetch more frontier elements from the queue
__syncthreads();
my_base = *base;
}
/////////////////////////////////////////////////////////
// Compute size of the output and allocate space in the global queue
if(tid == 0) {
*shift = atomicAdd(&tail[0], tail_bin);
}
__syncthreads();
///////////////////// CONCATENATE INTO GLOBAL MEMORY /////////////////////
int local_shift = tid;
while(local_shift < tail_bin) {
q2[*shift + local_shift] = l_q2[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
//////////////////////////////////////////////////////////////////////////
if(gtid == 0) {
atomicAdd(&iter[0], 1);
}
}
// Main
int main(int argc, char **argv) {
const Params p(argc, argv);
Timer timer;
// Allocate
int n_nodes, n_edges;
int status = read_input_size(n_nodes, n_edges, p);
if (status == -1) return 1;
timer.start("Host/Device Allocation");
Node * h_nodes = (Node *)malloc(sizeof(Node) * n_nodes);
Node * d_nodes;
hipMalloc((void**)&d_nodes, sizeof(Node) * n_nodes);
Edge * h_edges = (Edge *)malloc(sizeof(Edge) * n_edges);
Edge * d_edges;
hipMalloc((void**)&d_edges, sizeof(Edge) * n_edges);
std::atomic_int *h_color = (std::atomic_int *)malloc(sizeof(std::atomic_int) * n_nodes);
int * d_color;
hipMalloc((void**)&d_color, sizeof(int) * n_nodes);
std::atomic_int *h_cost = (std::atomic_int *)malloc(sizeof(std::atomic_int) * n_nodes);
int * d_cost;
hipMalloc((void**)&d_cost, sizeof(int) * n_nodes);
int * h_q1 = (int *)malloc(n_nodes * sizeof(int));
int * d_q1;
hipMalloc((void**)&d_q1, sizeof(int) * n_nodes);
int * h_q2 = (int *)malloc(n_nodes * sizeof(int));
int * d_q2;
hipMalloc((void**)&d_q2, sizeof(int) * n_nodes);
std::atomic_int h_head[1];
int * d_head;
hipMalloc((void**)&d_head, sizeof(int));
std::atomic_int h_tail[1];
int * d_tail;
hipMalloc((void**)&d_tail, sizeof(int));
std::atomic_int h_threads_end[1];
int * d_threads_end;
hipMalloc((void**)&d_threads_end, sizeof(int));
std::atomic_int h_threads_run[1];
int * d_threads_run;
hipMalloc((void**)&d_threads_run, sizeof(int));
int h_num_t[1];
int * d_num_t;
hipMalloc((void**)&d_num_t, sizeof(int));
int h_overflow[1];
int * d_overflow;
hipMalloc((void**)&d_overflow, sizeof(int));
std::atomic_int h_gray_shade[1];
int * d_gray_shade;
hipMalloc((void**)&d_gray_shade, sizeof(int));
std::atomic_int h_iter[1];
int * d_iter;
hipMalloc((void**)&d_iter, sizeof(int));
hipDeviceSynchronize();
//CUDA_ERR();
//ALLOC_ERR(h_nodes, h_edges, h_color, h_cost, h_q1, h_q2);
timer.stop("Host/Device Allocation");
// Initialize
timer.start("Initialization");
const int max_gpu_threads = 256;
int source;
read_input(source, h_nodes, h_edges, p);
for(int i = 0; i < n_nodes; i++) {
h_cost[i].store(INF);
}
h_cost[source].store(0);
for(int i = 0; i < n_nodes; i++) {
h_color[i].store(WHITE);
}
h_tail[0].store(0);
h_head[0].store(0);
h_threads_end[0].store(0);
h_threads_run[0].store(0);
h_q1[0] = source;
h_iter[0].store(0);
h_overflow[0] = 0;
h_gray_shade[0].store(GRAY0);
timer.stop("Initialization");
timer.print("Initialization", 1);
// Copy to device
timer.start("Copy To Device");
hipMemcpy(d_nodes, h_nodes, sizeof(Node) * n_nodes, hipMemcpyHostToDevice);
hipMemcpy(d_edges, h_edges, sizeof(Edge) * n_edges, hipMemcpyHostToDevice);
hipDeviceSynchronize();
//CUDA_ERR();
timer.stop("Copy To Device");
for(int rep = 0; rep < p.n_reps + p.n_warmup; rep++) {
// Reset
for(int i = 0; i < n_nodes; i++) {
h_cost[i].store(INF);
}
h_cost[source].store(0);
for(int i = 0; i < n_nodes; i++) {
h_color[i].store(WHITE);
}
h_tail[0].store(0);
h_head[0].store(0);
h_threads_end[0].store(0);
h_threads_run[0].store(0);
h_q1[0] = source;
h_iter[0].store(0);
h_overflow[0] = 0;
h_gray_shade[0].store(GRAY0);
if(rep >= p.n_warmup)
timer.start("Kernel");
// Run first iteration in master CPU thread
h_num_t[0] = 1;
int pid;
int index_i, index_o;
for(index_i = 0; index_i < h_num_t[0]; index_i++) {
pid = h_q1[index_i];
h_color[pid].store(BLACK);
int cur_cost = h_cost[pid].load();
for(int i = h_nodes[pid].x; i < (h_nodes[pid].y + h_nodes[pid].x); i++) {
int id = h_edges[i].x;
int cost = h_edges[i].y;
cost += cur_cost;
h_cost[id].store(cost);
h_color[id].store(GRAY0);
index_o = h_tail[0].fetch_add(1);
h_q2[index_o] = id;
}
}
h_num_t[0] = h_tail[0].load();
h_tail[0].store(0);
h_threads_run[0].fetch_add(1);
h_gray_shade[0].store(GRAY1);
h_iter[0].fetch_add(1);
if(rep >= p.n_warmup)
timer.stop("Kernel");
// Pointers to input and output queues
int * h_qin = h_q2;
int * h_qout = h_q1;
int * d_qin; // = d_q2;
int * d_qout; // = d_q1;
const int CPU_EXEC = (p.n_threads > 0) ? 1 : 0;
const int GPU_EXEC = (p.n_gpu_blocks > 0 && p.n_gpu_threads > 0) ? 1 : 0;
// Run subsequent iterations on CPU or GPU until number of input queue elements is 0
while(*h_num_t != 0) {
if((*h_num_t < p.switching_limit || GPU_EXEC == 0) &&
CPU_EXEC == 1) { // If the number of input queue elements is lower than switching_limit
if(rep >= p.n_warmup)
timer.start("Kernel on Host");
// Continue until switching_limit condition is not satisfied
while((*h_num_t != 0) && (*h_num_t < p.switching_limit || GPU_EXEC == 0) && CPU_EXEC == 1) {
// Swap queues
if(h_iter[0] % 2 == 0) {
h_qin = h_q1;
h_qout = h_q2;
} else {
h_qin = h_q2;
h_qout = h_q1;
}
std::thread main_thread(run_cpu_threads, h_nodes, h_edges, h_cost, h_color, h_qin, h_qout, h_num_t,
h_head, h_tail, h_threads_end, h_threads_run, h_gray_shade, h_iter, p.n_threads,
p.switching_limit, GPU_EXEC);
main_thread.join();
h_num_t[0] = h_tail[0].load(); // Number of elements in output queue
h_tail[0].store(0);
h_head[0].store(0);
if(h_iter[0].load() % 2 == 0)
h_gray_shade[0].store(GRAY0);
else
h_gray_shade[0].store(GRAY1);
}
if(rep >= p.n_warmup)
timer.stop("Kernel on Host");
} else if((*h_num_t >= p.switching_limit || CPU_EXEC == 0) &&
GPU_EXEC ==
1) { // If the number of input queue elements is higher than or equal to switching_limit
if(rep >= p.n_warmup)
timer.start("Copy To Device");
hipMemcpy(
d_cost, h_cost, sizeof(int) * n_nodes, hipMemcpyHostToDevice);
hipMemcpy(
d_color, h_color, sizeof(int) * n_nodes, hipMemcpyHostToDevice);
hipMemcpy(
d_threads_run, h_threads_run, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(
d_threads_end, h_threads_end, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(
d_overflow, h_overflow, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(
d_q1, h_q1, sizeof(int) * n_nodes, hipMemcpyHostToDevice);
hipMemcpy(
d_q2, h_q2, sizeof(int) * n_nodes, hipMemcpyHostToDevice);
hipMemcpy(
d_iter, h_iter, sizeof(int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy To Device");
// Continue until switching_limit condition is not satisfied
while((*h_num_t != 0) && (*h_num_t >= p.switching_limit || CPU_EXEC == 0) && GPU_EXEC == 1) {
//printf("h_iter %d\n", h_iter[0].load());
// Swap queues
if(h_iter[0] % 2 == 0) {
d_qin = d_q1;
d_qout = d_q2;
} else {
d_qin = d_q2;
d_qout = d_q1;
}
if(rep >= p.n_warmup)
timer.start("Copy To Device");
hipMemcpy(
d_num_t, h_num_t, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(
d_tail, h_tail, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(
d_head, h_head, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(
d_gray_shade, h_gray_shade, sizeof(int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy To Device");
assert(p.n_gpu_threads <= max_gpu_threads &&
"The thread block size is greater than the maximum thread block size that can be used on this device");
dim3 dimGrid(p.n_gpu_blocks);
dim3 dimBlock(p.n_gpu_threads);
if(rep >= p.n_warmup)
timer.start("Kernel on Device");
hipLaunchKernelGGL(( SSSP_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, d_nodes, d_edges, d_cost,
d_color, d_qin, d_qout, d_num_t,
d_head, d_tail, d_overflow, d_gray_shade, d_iter);
hipDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Kernel on Device");
if(rep >= p.n_warmup)
timer.start("Copy Back and Merge");
hipMemcpy(
h_tail, d_tail, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(
h_iter, d_iter, sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy Back and Merge");
h_num_t[0] = h_tail[0].load(); // Number of elements in output queue
h_tail[0].store(0);
h_head[0].store(0);
if(h_iter[0].load() % 2 == 0)
h_gray_shade[0].store(GRAY0);
else
h_gray_shade[0].store(GRAY1);
}
if(rep >= p.n_warmup)
timer.start("Copy Back and Merge");
hipMemcpy(
h_cost, d_cost, sizeof(int) * n_nodes, hipMemcpyDeviceToHost);
hipMemcpy(
h_color, d_color, sizeof(int) * n_nodes, hipMemcpyDeviceToHost);
hipMemcpy(
h_threads_run, d_threads_run, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(
h_threads_end, d_threads_end, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(
h_overflow, d_overflow, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(
h_q1, d_q1, sizeof(int) * n_nodes, hipMemcpyDeviceToHost);
hipMemcpy(
h_q2, d_q2, sizeof(int) * n_nodes, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy Back and Merge");
}
}
} // end of iteration
timer.print("Host/Device Allocation", 1);
timer.print("Copy To Device", p.n_reps);
timer.print("Kernel on Host", p.n_reps);
timer.print("Kernel on Device", p.n_reps);
timer.print("Copy Back and Merge", p.n_reps);
// Verify answer
bool ok = verify(h_cost, n_nodes, p.comparison_file);
// Free memory
timer.start("Host/Device Deallocation");
free(h_nodes);
free(h_edges);
free(h_color);
free(h_cost);
free(h_q1);
free(h_q2);
hipFree(d_nodes);
hipFree(d_edges);
hipFree(d_cost);
hipFree(d_color);
hipFree(d_q1);
hipFree(d_q2);
hipFree(d_num_t);
hipFree(d_head);
hipFree(d_tail);
hipFree(d_threads_end);
hipFree(d_threads_run);
hipFree(d_overflow);
hipFree(d_iter);
hipFree(d_gray_shade);
//CUDA_ERR();
timer.stop("Host/Device Deallocation");
timer.print("Host/Device Deallocation", 1);
printf("%s\n", ok ? "PASS" : "FAIL");
return 0;
}
| 94226d1768af40e7dc086708a91709fc493f3b64.cu | /*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#include <unistd.h>
#include <thread>
#include <assert.h>
#include <cuda.h>
#include "kernel.h"
#include "support/common.h"
#include "support/timer.h"
#include "support/verify.h"
// Params
struct Params {
int device;
int n_gpu_threads;
int n_gpu_blocks;
int n_threads;
int n_warmup;
int n_reps;
const char *file_name;
const char *comparison_file;
int switching_limit;
Params(int argc, char **argv) {
device = 0;
n_gpu_threads = 256;
n_gpu_blocks = 8;
n_threads = 2;
n_warmup = 1;
n_reps = 1;
file_name = "input/NYR_input.dat";
comparison_file = "output/NYR_bfs.out";
switching_limit = 128;
int opt;
while((opt = getopt(argc, argv, "hd:i:g:t:w:r:f:c:l:")) >= 0) {
switch(opt) {
case 'h':
usage();
exit(0);
break;
case 'd': device = atoi(optarg); break;
case 'i': n_gpu_threads = atoi(optarg); break;
case 'g': n_gpu_blocks = atoi(optarg); break;
case 't': n_threads = atoi(optarg); break;
case 'w': n_warmup = atoi(optarg); break;
case 'r': n_reps = atoi(optarg); break;
case 'f': file_name = optarg; break;
case 'c': comparison_file = optarg; break;
case 'l': switching_limit = atoi(optarg); break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(n_gpu_threads > 0 && "Invalid # of device threads!");
assert(n_gpu_blocks > 0 && "Invalid # of device blocks!");
assert(n_threads > 0 && "Invalid # of host threads!");
}
void usage() {
fprintf(stderr,
"\nUsage: ./sssp [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -d <D> GPU device ID (default=0)"
"\n -i <I> # of device threads per block (default=256)"
"\n -g <G> # of device blocks (default=8)"
"\n -t <T> # of host threads (default=2)"
"\n -w <W> # of untimed warmup iterations (default=1)"
"\n -r <R> # of timed repetition iterations (default=1)"
"\n"
"\nBenchmark-specific options:"
"\n -f <F> name of input file with control points (default=input/NYR_input.dat)"
"\n -c <C> comparison file (default=output/NYR_bfs_BFS.out)"
"\n -l <L> switching limit (default=128)"
"\n");
}
};
// Input Data
int read_input_size(int &n_nodes, int &n_edges, const Params &p) {
FILE *fp = fopen(p.file_name, "r");
if (fp == NULL) {
printf("Error: failed to read file %s. Exit\n", p.file_name);
return -1;
}
fscanf(fp, "%d", &n_nodes);
fscanf(fp, "%d", &n_edges);
if(fp) fclose(fp);
return 0;
}
void read_input(int &source, Node *&h_nodes, Edge *&h_edges, const Params &p) {
int start, edgeno;
int n_nodes, n_edges;
int id, cost;
FILE *fp = fopen(p.file_name, "r");
fscanf(fp, "%d", &n_nodes);
fscanf(fp, "%d", &n_edges);
fscanf(fp, "%d", &source);
printf("Number of nodes = %d\t", n_nodes);
printf("Number of edges = %d\t", n_edges);
// initalize the memory: Nodes
for(int i = 0; i < n_nodes; i++) {
fscanf(fp, "%d %d", &start, &edgeno);
h_nodes[i].x = start;
h_nodes[i].y = edgeno;
}
#if PRINT_ALL
for(int i = 0; i < n_nodes; i++) {
printf("%d, %d\n", h_nodes[i].x, h_nodes[i].y);
}
#endif
// initalize the memory: Edges
for(int i = 0; i < n_edges; i++) {
fscanf(fp, "%d", &id);
fscanf(fp, "%d", &cost);
h_edges[i].x = id;
h_edges[i].y = -cost;
}
if(fp) fclose(fp);
}
// GPU kernel
__global__ void SSSP_gpu(
const Node *__restrict__ graph_nodes_av,
const Edge *__restrict__ graph_edges_av,
int *__restrict__ cost,
int *__restrict__ color,
const int *__restrict__ q1,
int *__restrict__ q2,
const int *__restrict__ n_t,
int *__restrict__ head,
int *__restrict__ tail,
int *__restrict__ overflow,
const int *__restrict__ gray_shade,
int *__restrict__ iter)
{
__shared__ int l_mem[W_QUEUE_SIZE+2];
__shared__ int tail_bin;
int* l_q2 = l_mem;
int* shift = l_mem + W_QUEUE_SIZE;
int* base = l_mem + W_QUEUE_SIZE + 1;
const int tid = threadIdx.x;
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
const int WG_SIZE = blockDim.x;
int n_t_local = *n_t; // atomicAdd(n_t, 0);
int gray_shade_local = *gray_shade; // atomicAdd(&gray_shade[0], 0);
if(tid == 0) {
// Reset queue
tail_bin = 0;
}
// Fetch frontier elements from the queue
if(tid == 0)
*base = atomicAdd(&head[0], WG_SIZE);
__syncthreads();
int my_base = *base;
while(my_base < n_t_local) {
// If local queue might overflow
if(tail_bin >= W_QUEUE_SIZE / 2) {
if(tid == 0) {
// Add local tail_bin to tail
*shift = atomicAdd(&tail[0], tail_bin);
}
__syncthreads();
int local_shift = tid;
while(local_shift < tail_bin) {
q2[*shift + local_shift] = l_q2[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
__syncthreads();
if(tid == 0) {
// Reset local queue
tail_bin = 0;
}
__syncthreads();
}
if(my_base + tid < n_t_local && *overflow == 0) {
// Visit a node from the current frontier
int pid = q1[my_base + tid];
//////////////// Visit node ///////////////////////////
atomicExch(&color[pid], BLACK); // Node visited
int cur_cost = cost[pid]; // atomicAdd(&cost[pid], 0); // Look up shortest-path distance to this node
Node cur_node;
cur_node.x = graph_nodes_av[pid].x;
cur_node.y = graph_nodes_av[pid].y;
Edge cur_edge;
// For each outgoing edge
for(int i = cur_node.x; i < cur_node.y + cur_node.x; i++) {
cur_edge.x = graph_edges_av[i].x;
cur_edge.y = graph_edges_av[i].y;
int id = cur_edge.x;
int cost_local = cur_edge.y;
cost_local += cur_cost;
int orig_cost = atomicMax(&cost[id], cost_local);
if(orig_cost < cost_local) {
int old_color = atomicMax(&color[id], gray_shade_local);
if(old_color != gray_shade_local) {
// Push to the queue
int tail_index = atomicAdd(&tail_bin, 1);
if(tail_index >= W_QUEUE_SIZE) {
*overflow = 1;
} else
l_q2[tail_index] = id;
}
}
}
}
if(tid == 0)
*base = atomicAdd(&head[0], WG_SIZE); // Fetch more frontier elements from the queue
__syncthreads();
my_base = *base;
}
/////////////////////////////////////////////////////////
// Compute size of the output and allocate space in the global queue
if(tid == 0) {
*shift = atomicAdd(&tail[0], tail_bin);
}
__syncthreads();
///////////////////// CONCATENATE INTO GLOBAL MEMORY /////////////////////
int local_shift = tid;
while(local_shift < tail_bin) {
q2[*shift + local_shift] = l_q2[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
//////////////////////////////////////////////////////////////////////////
if(gtid == 0) {
atomicAdd(&iter[0], 1);
}
}
// Main
int main(int argc, char **argv) {
const Params p(argc, argv);
Timer timer;
// Allocate
int n_nodes, n_edges;
int status = read_input_size(n_nodes, n_edges, p);
if (status == -1) return 1;
timer.start("Host/Device Allocation");
Node * h_nodes = (Node *)malloc(sizeof(Node) * n_nodes);
Node * d_nodes;
cudaMalloc((void**)&d_nodes, sizeof(Node) * n_nodes);
Edge * h_edges = (Edge *)malloc(sizeof(Edge) * n_edges);
Edge * d_edges;
cudaMalloc((void**)&d_edges, sizeof(Edge) * n_edges);
std::atomic_int *h_color = (std::atomic_int *)malloc(sizeof(std::atomic_int) * n_nodes);
int * d_color;
cudaMalloc((void**)&d_color, sizeof(int) * n_nodes);
std::atomic_int *h_cost = (std::atomic_int *)malloc(sizeof(std::atomic_int) * n_nodes);
int * d_cost;
cudaMalloc((void**)&d_cost, sizeof(int) * n_nodes);
int * h_q1 = (int *)malloc(n_nodes * sizeof(int));
int * d_q1;
cudaMalloc((void**)&d_q1, sizeof(int) * n_nodes);
int * h_q2 = (int *)malloc(n_nodes * sizeof(int));
int * d_q2;
cudaMalloc((void**)&d_q2, sizeof(int) * n_nodes);
std::atomic_int h_head[1];
int * d_head;
cudaMalloc((void**)&d_head, sizeof(int));
std::atomic_int h_tail[1];
int * d_tail;
cudaMalloc((void**)&d_tail, sizeof(int));
std::atomic_int h_threads_end[1];
int * d_threads_end;
cudaMalloc((void**)&d_threads_end, sizeof(int));
std::atomic_int h_threads_run[1];
int * d_threads_run;
cudaMalloc((void**)&d_threads_run, sizeof(int));
int h_num_t[1];
int * d_num_t;
cudaMalloc((void**)&d_num_t, sizeof(int));
int h_overflow[1];
int * d_overflow;
cudaMalloc((void**)&d_overflow, sizeof(int));
std::atomic_int h_gray_shade[1];
int * d_gray_shade;
cudaMalloc((void**)&d_gray_shade, sizeof(int));
std::atomic_int h_iter[1];
int * d_iter;
cudaMalloc((void**)&d_iter, sizeof(int));
cudaDeviceSynchronize();
//CUDA_ERR();
//ALLOC_ERR(h_nodes, h_edges, h_color, h_cost, h_q1, h_q2);
timer.stop("Host/Device Allocation");
// Initialize
timer.start("Initialization");
const int max_gpu_threads = 256;
int source;
read_input(source, h_nodes, h_edges, p);
for(int i = 0; i < n_nodes; i++) {
h_cost[i].store(INF);
}
h_cost[source].store(0);
for(int i = 0; i < n_nodes; i++) {
h_color[i].store(WHITE);
}
h_tail[0].store(0);
h_head[0].store(0);
h_threads_end[0].store(0);
h_threads_run[0].store(0);
h_q1[0] = source;
h_iter[0].store(0);
h_overflow[0] = 0;
h_gray_shade[0].store(GRAY0);
timer.stop("Initialization");
timer.print("Initialization", 1);
// Copy to device
timer.start("Copy To Device");
cudaMemcpy(d_nodes, h_nodes, sizeof(Node) * n_nodes, cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, h_edges, sizeof(Edge) * n_edges, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
//CUDA_ERR();
timer.stop("Copy To Device");
for(int rep = 0; rep < p.n_reps + p.n_warmup; rep++) {
// Reset
for(int i = 0; i < n_nodes; i++) {
h_cost[i].store(INF);
}
h_cost[source].store(0);
for(int i = 0; i < n_nodes; i++) {
h_color[i].store(WHITE);
}
h_tail[0].store(0);
h_head[0].store(0);
h_threads_end[0].store(0);
h_threads_run[0].store(0);
h_q1[0] = source;
h_iter[0].store(0);
h_overflow[0] = 0;
h_gray_shade[0].store(GRAY0);
if(rep >= p.n_warmup)
timer.start("Kernel");
// Run first iteration in master CPU thread
h_num_t[0] = 1;
int pid;
int index_i, index_o;
for(index_i = 0; index_i < h_num_t[0]; index_i++) {
pid = h_q1[index_i];
h_color[pid].store(BLACK);
int cur_cost = h_cost[pid].load();
for(int i = h_nodes[pid].x; i < (h_nodes[pid].y + h_nodes[pid].x); i++) {
int id = h_edges[i].x;
int cost = h_edges[i].y;
cost += cur_cost;
h_cost[id].store(cost);
h_color[id].store(GRAY0);
index_o = h_tail[0].fetch_add(1);
h_q2[index_o] = id;
}
}
h_num_t[0] = h_tail[0].load();
h_tail[0].store(0);
h_threads_run[0].fetch_add(1);
h_gray_shade[0].store(GRAY1);
h_iter[0].fetch_add(1);
if(rep >= p.n_warmup)
timer.stop("Kernel");
// Pointers to input and output queues
int * h_qin = h_q2;
int * h_qout = h_q1;
int * d_qin; // = d_q2;
int * d_qout; // = d_q1;
const int CPU_EXEC = (p.n_threads > 0) ? 1 : 0;
const int GPU_EXEC = (p.n_gpu_blocks > 0 && p.n_gpu_threads > 0) ? 1 : 0;
// Run subsequent iterations on CPU or GPU until number of input queue elements is 0
while(*h_num_t != 0) {
if((*h_num_t < p.switching_limit || GPU_EXEC == 0) &&
CPU_EXEC == 1) { // If the number of input queue elements is lower than switching_limit
if(rep >= p.n_warmup)
timer.start("Kernel on Host");
// Continue until switching_limit condition is not satisfied
while((*h_num_t != 0) && (*h_num_t < p.switching_limit || GPU_EXEC == 0) && CPU_EXEC == 1) {
// Swap queues
if(h_iter[0] % 2 == 0) {
h_qin = h_q1;
h_qout = h_q2;
} else {
h_qin = h_q2;
h_qout = h_q1;
}
std::thread main_thread(run_cpu_threads, h_nodes, h_edges, h_cost, h_color, h_qin, h_qout, h_num_t,
h_head, h_tail, h_threads_end, h_threads_run, h_gray_shade, h_iter, p.n_threads,
p.switching_limit, GPU_EXEC);
main_thread.join();
h_num_t[0] = h_tail[0].load(); // Number of elements in output queue
h_tail[0].store(0);
h_head[0].store(0);
if(h_iter[0].load() % 2 == 0)
h_gray_shade[0].store(GRAY0);
else
h_gray_shade[0].store(GRAY1);
}
if(rep >= p.n_warmup)
timer.stop("Kernel on Host");
} else if((*h_num_t >= p.switching_limit || CPU_EXEC == 0) &&
GPU_EXEC ==
1) { // If the number of input queue elements is higher than or equal to switching_limit
if(rep >= p.n_warmup)
timer.start("Copy To Device");
cudaMemcpy(
d_cost, h_cost, sizeof(int) * n_nodes, cudaMemcpyHostToDevice);
cudaMemcpy(
d_color, h_color, sizeof(int) * n_nodes, cudaMemcpyHostToDevice);
cudaMemcpy(
d_threads_run, h_threads_run, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(
d_threads_end, h_threads_end, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(
d_overflow, h_overflow, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(
d_q1, h_q1, sizeof(int) * n_nodes, cudaMemcpyHostToDevice);
cudaMemcpy(
d_q2, h_q2, sizeof(int) * n_nodes, cudaMemcpyHostToDevice);
cudaMemcpy(
d_iter, h_iter, sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy To Device");
// Continue until switching_limit condition is not satisfied
while((*h_num_t != 0) && (*h_num_t >= p.switching_limit || CPU_EXEC == 0) && GPU_EXEC == 1) {
//printf("h_iter %d\n", h_iter[0].load());
// Swap queues
if(h_iter[0] % 2 == 0) {
d_qin = d_q1;
d_qout = d_q2;
} else {
d_qin = d_q2;
d_qout = d_q1;
}
if(rep >= p.n_warmup)
timer.start("Copy To Device");
cudaMemcpy(
d_num_t, h_num_t, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(
d_tail, h_tail, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(
d_head, h_head, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(
d_gray_shade, h_gray_shade, sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy To Device");
assert(p.n_gpu_threads <= max_gpu_threads &&
"The thread block size is greater than the maximum thread block size that can be used on this device");
dim3 dimGrid(p.n_gpu_blocks);
dim3 dimBlock(p.n_gpu_threads);
if(rep >= p.n_warmup)
timer.start("Kernel on Device");
SSSP_gpu<<<dimGrid, dimBlock>>>(d_nodes, d_edges, d_cost,
d_color, d_qin, d_qout, d_num_t,
d_head, d_tail, d_overflow, d_gray_shade, d_iter);
cudaDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Kernel on Device");
if(rep >= p.n_warmup)
timer.start("Copy Back and Merge");
cudaMemcpy(
h_tail, d_tail, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(
h_iter, d_iter, sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy Back and Merge");
h_num_t[0] = h_tail[0].load(); // Number of elements in output queue
h_tail[0].store(0);
h_head[0].store(0);
if(h_iter[0].load() % 2 == 0)
h_gray_shade[0].store(GRAY0);
else
h_gray_shade[0].store(GRAY1);
}
if(rep >= p.n_warmup)
timer.start("Copy Back and Merge");
cudaMemcpy(
h_cost, d_cost, sizeof(int) * n_nodes, cudaMemcpyDeviceToHost);
cudaMemcpy(
h_color, d_color, sizeof(int) * n_nodes, cudaMemcpyDeviceToHost);
cudaMemcpy(
h_threads_run, d_threads_run, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(
h_threads_end, d_threads_end, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(
h_overflow, d_overflow, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(
h_q1, d_q1, sizeof(int) * n_nodes, cudaMemcpyDeviceToHost);
cudaMemcpy(
h_q2, d_q2, sizeof(int) * n_nodes, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//CUDA_ERR();
if(rep >= p.n_warmup)
timer.stop("Copy Back and Merge");
}
}
} // end of iteration
timer.print("Host/Device Allocation", 1);
timer.print("Copy To Device", p.n_reps);
timer.print("Kernel on Host", p.n_reps);
timer.print("Kernel on Device", p.n_reps);
timer.print("Copy Back and Merge", p.n_reps);
// Verify answer
bool ok = verify(h_cost, n_nodes, p.comparison_file);
// Free memory
timer.start("Host/Device Deallocation");
free(h_nodes);
free(h_edges);
free(h_color);
free(h_cost);
free(h_q1);
free(h_q2);
cudaFree(d_nodes);
cudaFree(d_edges);
cudaFree(d_cost);
cudaFree(d_color);
cudaFree(d_q1);
cudaFree(d_q2);
cudaFree(d_num_t);
cudaFree(d_head);
cudaFree(d_tail);
cudaFree(d_threads_end);
cudaFree(d_threads_run);
cudaFree(d_overflow);
cudaFree(d_iter);
cudaFree(d_gray_shade);
//CUDA_ERR();
timer.stop("Host/Device Deallocation");
timer.print("Host/Device Deallocation", 1);
printf("%s\n", ok ? "PASS" : "FAIL");
return 0;
}
|
677795e675ae1551651e4286a1f10e5330b76379.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_demo.h"
#include "cuda_helpers.h"
#include <iostream>
int main(int argc, char** argv)
{
cuda_demo::device_memory::demonstrateDeviceMemory();
return 0;
}
void cuda_demo::device_memory::demonstrateDeviceMemory()
{
std::cout << "Device Memory Demo\n";
std::cout << "==================\n\n";
linear_memory::demonstrateLinearDeviceMemory();
linear_memory::demonstrateSharedDeviceMemory();
}
void cuda_demo::device_memory::linear_memory::demonstrateLinearDeviceMemory()
{
std::cout << "Linear Device Memory Demo\n";
std::cout << "\tAdding numbers on the GPU:\n";
// Setting Host Memory Variables
auto h_a = 1, h_b = 1, h_c = 0;
// Reserve pointers on Host and allocate memory on device
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, sizeof(int));
hipMalloc(&d_b, sizeof(int));
hipMalloc(&d_c, sizeof(int));
// Move input values to the device
hipMemcpy(d_a, &h_a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, &h_b, sizeof(int), hipMemcpyHostToDevice);
// Calculate result on the device
addInt << <1, 1 >> >(d_a, d_b, d_c);
// Move output value to the host
hipMemcpy(&h_c, d_c, sizeof(int), hipMemcpyDeviceToHost);
// Free memory on the device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
std::cout << "\t" << h_a << " + " << h_b << " = " << h_c << "\n";
}
void cuda_demo::device_memory::linear_memory::demonstrateSharedDeviceMemory()
{
std::cout << "Shared Device Memory Demo\n";
std::cout << "\tAdding numbers on the GPU via Shared Memory:\n";
// Setting Host Memory Variables
auto h_a = 1, h_b = 1, h_c = 0;
// Reserve pointers on Host and allocate memory on device
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, sizeof(int));
hipMalloc(&d_b, sizeof(int));
hipMalloc(&d_c, sizeof(int));
// Move input values to the device
hipMemcpy(d_a, &h_a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, &h_b, sizeof(int), hipMemcpyHostToDevice);
// Calculate result on the device using shared memory
addIntSharedMemory << <1, 1 >> >(d_a, d_b, d_c);
// Move output value to the host
hipMemcpy(&h_c, d_c, sizeof(int), hipMemcpyDeviceToHost);
// Free memory on the device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
std::cout << "\t" << h_a << " + " << h_b << " = " << h_c << "\n";
}
| 677795e675ae1551651e4286a1f10e5330b76379.cu | #include "cuda_demo.h"
#include "cuda_helpers.h"
#include <iostream>
int main(int argc, char** argv)
{
cuda_demo::device_memory::demonstrateDeviceMemory();
return 0;
}
void cuda_demo::device_memory::demonstrateDeviceMemory()
{
std::cout << "Device Memory Demo\n";
std::cout << "==================\n\n";
linear_memory::demonstrateLinearDeviceMemory();
linear_memory::demonstrateSharedDeviceMemory();
}
void cuda_demo::device_memory::linear_memory::demonstrateLinearDeviceMemory()
{
std::cout << "Linear Device Memory Demo\n";
std::cout << "\tAdding numbers on the GPU:\n";
// Setting Host Memory Variables
auto h_a = 1, h_b = 1, h_c = 0;
// Reserve pointers on Host and allocate memory on device
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, sizeof(int));
cudaMalloc(&d_b, sizeof(int));
cudaMalloc(&d_c, sizeof(int));
// Move input values to the device
cudaMemcpy(d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, sizeof(int), cudaMemcpyHostToDevice);
// Calculate result on the device
addInt << <1, 1 >> >(d_a, d_b, d_c);
// Move output value to the host
cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
// Free memory on the device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
std::cout << "\t" << h_a << " + " << h_b << " = " << h_c << "\n";
}
void cuda_demo::device_memory::linear_memory::demonstrateSharedDeviceMemory()
{
std::cout << "Shared Device Memory Demo\n";
std::cout << "\tAdding numbers on the GPU via Shared Memory:\n";
// Setting Host Memory Variables
auto h_a = 1, h_b = 1, h_c = 0;
// Reserve pointers on Host and allocate memory on device
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, sizeof(int));
cudaMalloc(&d_b, sizeof(int));
cudaMalloc(&d_c, sizeof(int));
// Move input values to the device
cudaMemcpy(d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, sizeof(int), cudaMemcpyHostToDevice);
// Calculate result on the device using shared memory
addIntSharedMemory << <1, 1 >> >(d_a, d_b, d_c);
// Move output value to the host
cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
// Free memory on the device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
std::cout << "\t" << h_a << " + " << h_b << " = " << h_c << "\n";
}
|
04f48885e5ab14a79ae00d54a7415ecfea5f765e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 8
#define BLOCK_Y 8
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=256, NY=256, NZ=256, REPEAT=10,
bx, by, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( hipMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
hipEventRecord(start);
checkCudaErrors( hipMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyHostToDevice) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
dim3 dimGrid(bx,by);
dim3 dimBlock(BLOCK_X,BLOCK_Y);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
hipEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
hipLaunchKernelGGL(( GPU_laplace3d), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
hipEventRecord(start);
checkCudaErrors( hipMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyDeviceToHost) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
hipEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( hipFree(d_u1) );
checkCudaErrors( hipFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
hipDeviceReset();
}
| 04f48885e5ab14a79ae00d54a7415ecfea5f765e.cu | //
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 8
#define BLOCK_Y 8
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=256, NY=256, NZ=256, REPEAT=10,
bx, by, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( cudaMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyHostToDevice) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
dim3 dimGrid(bx,by);
dim3 dimBlock(BLOCK_X,BLOCK_Y);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
cudaEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
GPU_laplace3d<<<dimGrid, dimBlock>>>(NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
cudaEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( cudaFree(d_u1) );
checkCudaErrors( cudaFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
cudaDeviceReset();
}
|
2f395ad56d767167174c6be072467832864cad61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "gen_hip.cuh"
static const int __tb_PageRank = TB_SIZE;
static const int __tb_InitializeGraph = TB_SIZE;
__global__ void ResetGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_value[src] = 0;
p_nout[src] = 0;
p_delta[src] = 0;
p_residual[src] = local_alpha;
}
}
// FP: "10 -> 11;
}
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_nout, DynamicBitset& bitset_nout)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_InitializeGraph;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { ; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type nbr;
nbr = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_nout[dst], (uint32_t)1);
bitset_nout.set(dst);
}
}
// FP: "51 -> 52;
__syncthreads();
}
// FP: "53 -> 54;
// FP: "54 -> 55;
{
const int warpid = threadIdx.x / 32;
// FP: "55 -> 56;
const int _np_laneid = cub::LaneId();
// FP: "56 -> 57;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type nbr;
nbr = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_nout[dst], (uint32_t)1);
bitset_nout.set(dst);
}
}
}
// FP: "74 -> 75;
__syncthreads();
// FP: "75 -> 76;
}
// FP: "76 -> 77;
__syncthreads();
// FP: "77 -> 78;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "78 -> 79;
while (_np.work())
{
// FP: "79 -> 80;
int _np_i =0;
// FP: "80 -> 81;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "81 -> 82;
__syncthreads();
// FP: "82 -> 83;
// FP: "83 -> 84;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type nbr;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
nbr= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_nout[dst], (uint32_t)1);
bitset_nout.set(dst);
}
}
// FP: "92 -> 93;
_np.execute_round_done(ITSIZE);
// FP: "93 -> 94;
__syncthreads();
}
// FP: "95 -> 96;
assert(threadIdx.x < __kernel_tb_size);
}
// FP: "97 -> 98;
}
__global__ void PageRank_delta(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value, HGAccumulator<unsigned int> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_accum.thread_entry();
// FP: "3 -> 4;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_delta[src] = 0;
if (p_residual[src] > local_tolerance)
{
p_value[src] += p_residual[src];
if (p_nout[src] > 0)
{
p_delta[src] = p_residual[src] * (1 - local_alpha) / p_nout[src];
DGAccumulator_accum.reduce( 1);
}
p_residual[src] = 0;
}
}
}
// FP: "17 -> 18;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "18 -> 19;
}
__global__ void PageRank(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, float * p_residual)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_PageRank;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
_np_closure[threadIdx.x].src = src;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type nbr;
nbr = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
if (p_delta[dst] > 0)
{
atomicTestAdd(&p_residual[src], p_delta[dst]);
}
}
}
// FP: "53 -> 54;
__syncthreads();
}
// FP: "55 -> 56;
// FP: "56 -> 57;
{
const int warpid = threadIdx.x / 32;
// FP: "57 -> 58;
const int _np_laneid = cub::LaneId();
// FP: "58 -> 59;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type nbr;
nbr = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
if (p_delta[dst] > 0)
{
atomicTestAdd(&p_residual[src], p_delta[dst]);
}
}
}
}
// FP: "78 -> 79;
__syncthreads();
// FP: "79 -> 80;
}
// FP: "80 -> 81;
__syncthreads();
// FP: "81 -> 82;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "82 -> 83;
while (_np.work())
{
// FP: "83 -> 84;
int _np_i =0;
// FP: "84 -> 85;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "85 -> 86;
__syncthreads();
// FP: "86 -> 87;
// FP: "87 -> 88;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type nbr;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
nbr= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(nbr);
if (p_delta[dst] > 0)
{
atomicTestAdd(&p_residual[src], p_delta[dst]);
}
}
}
// FP: "98 -> 99;
_np.execute_round_done(ITSIZE);
// FP: "99 -> 100;
__syncthreads();
}
// FP: "101 -> 102;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "103 -> 104;
}
__global__ void PageRankSanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float local_tolerance, float * p_residual, float * p_value, HGAccumulator<uint64_t> DGAccumulator_residual_over_tolerance, HGAccumulator<float> DGAccumulator_sum, HGAccumulator<float> DGAccumulator_sum_residual, HGReduceMax<float> max_residual, HGReduceMax<float> max_value, HGReduceMin<float> min_residual, HGReduceMin<float> min_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_residual_over_tolerance_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_residual_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage max_residual_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage max_value_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage min_residual_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage min_value_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_residual_over_tolerance.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGAccumulator_sum.thread_entry();
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_sum_residual.thread_entry();
// FP: "7 -> 8;
// FP: "8 -> 9;
max_residual.thread_entry();
// FP: "9 -> 10;
// FP: "10 -> 11;
max_value.thread_entry();
// FP: "11 -> 12;
// FP: "12 -> 13;
min_residual.thread_entry();
// FP: "13 -> 14;
// FP: "14 -> 15;
min_value.thread_entry();
// FP: "15 -> 16;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
max_value.reduce(p_value[src]);
min_value.reduce(p_value[src]);
max_residual.reduce(p_residual[src]);
min_residual.reduce(p_residual[src]);
DGAccumulator_sum.reduce( p_value[src]);
DGAccumulator_sum.reduce( p_residual[src]);
if (p_residual[src] > local_tolerance)
{
DGAccumulator_residual_over_tolerance.reduce( 1);
}
}
}
// FP: "29 -> 30;
DGAccumulator_residual_over_tolerance.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_residual_over_tolerance_ts);
// FP: "30 -> 31;
DGAccumulator_sum.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "31 -> 32;
DGAccumulator_sum_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_residual_ts);
// FP: "32 -> 33;
max_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(max_residual_ts);
// FP: "33 -> 34;
max_value.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(max_value_ts);
// FP: "34 -> 35;
min_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(min_residual_ts);
// FP: "35 -> 36;
min_value.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(min_value_ts);
// FP: "36 -> 37;
}
void ResetGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( ResetGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_alpha, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void ResetGraph_allNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->gg.nnodes, local_alpha, ctx);
// FP: "2 -> 3;
}
void ResetGraph_masterNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, ctx);
// FP: "2 -> 3;
}
void ResetGraph_nodesWithEdges_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(__tb_InitializeGraph), 0, 0, ctx->gg, __begin, __end, ctx->nout.data.gpu_wr_ptr(), *(ctx->nout.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
hipLaunchKernelGGL(( PageRank_delta) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_alpha, local_tolerance, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void PageRank_delta_allNodes_cuda(unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_masterNodes_cuda(unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( PageRank) , dim3(blocks), dim3(__tb_PageRank), 0, 0, ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PageRank_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void PageRank_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void PageRank_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_residual_over_tolerance;
HGAccumulator<float> _DGAccumulator_sum;
HGAccumulator<float> _DGAccumulator_sum_residual;
HGReduceMax<float> _max_residual;
HGReduceMax<float> _max_value;
HGReduceMin<float> _min_residual;
HGReduceMin<float> _min_value;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_residual_over_toleranceval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_residual_over_toleranceval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_residual_over_tolerance.rv = DGAccumulator_residual_over_toleranceval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<float> DGAccumulator_sumval = Shared<float>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "12 -> 13;
Shared<float> DGAccumulator_sum_residualval = Shared<float>(1);
// FP: "13 -> 14;
// FP: "14 -> 15;
*(DGAccumulator_sum_residualval.cpu_wr_ptr()) = 0;
// FP: "15 -> 16;
_DGAccumulator_sum_residual.rv = DGAccumulator_sum_residualval.gpu_wr_ptr();
// FP: "16 -> 17;
Shared<float> max_residualval = Shared<float>(1);
// FP: "17 -> 18;
// FP: "18 -> 19;
*(max_residualval.cpu_wr_ptr()) = 0;
// FP: "19 -> 20;
_max_residual.rv = max_residualval.gpu_wr_ptr();
// FP: "20 -> 21;
Shared<float> max_valueval = Shared<float>(1);
// FP: "21 -> 22;
// FP: "22 -> 23;
*(max_valueval.cpu_wr_ptr()) = 0;
// FP: "23 -> 24;
_max_value.rv = max_valueval.gpu_wr_ptr();
// FP: "24 -> 25;
Shared<float> min_residualval = Shared<float>(1);
// FP: "25 -> 26;
// FP: "26 -> 27;
*(min_residualval.cpu_wr_ptr()) = 0;
// FP: "27 -> 28;
_min_residual.rv = min_residualval.gpu_wr_ptr();
// FP: "28 -> 29;
Shared<float> min_valueval = Shared<float>(1);
// FP: "29 -> 30;
// FP: "30 -> 31;
*(min_valueval.cpu_wr_ptr()) = 0;
// FP: "31 -> 32;
_min_value.rv = min_valueval.gpu_wr_ptr();
// FP: "32 -> 33;
hipLaunchKernelGGL(( PageRankSanity) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_tolerance, ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_residual_over_tolerance, _DGAccumulator_sum, _DGAccumulator_sum_residual, _max_residual, _max_value, _min_residual, _min_value);
// FP: "33 -> 34;
check_cuda_kernel;
// FP: "34 -> 35;
DGAccumulator_residual_over_tolerance = *(DGAccumulator_residual_over_toleranceval.cpu_rd_ptr());
// FP: "35 -> 36;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "36 -> 37;
DGAccumulator_sum_residual = *(DGAccumulator_sum_residualval.cpu_rd_ptr());
// FP: "37 -> 38;
max_residual = *(max_residualval.cpu_rd_ptr());
// FP: "38 -> 39;
max_value = *(max_valueval.cpu_rd_ptr());
// FP: "39 -> 40;
min_residual = *(min_residualval.cpu_rd_ptr());
// FP: "40 -> 41;
min_value = *(min_valueval.cpu_rd_ptr());
// FP: "41 -> 42;
}
void PageRankSanity_allNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->gg.nnodes, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_masterNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_nodesWithEdges_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
| 2f395ad56d767167174c6be072467832864cad61.cu | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "gen_cuda.cuh"
static const int __tb_PageRank = TB_SIZE;
static const int __tb_InitializeGraph = TB_SIZE;
__global__ void ResetGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_value[src] = 0;
p_nout[src] = 0;
p_delta[src] = 0;
p_residual[src] = local_alpha;
}
}
// FP: "10 -> 11;
}
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_nout, DynamicBitset& bitset_nout)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_InitializeGraph;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { ; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type nbr;
nbr = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_nout[dst], (uint32_t)1);
bitset_nout.set(dst);
}
}
// FP: "51 -> 52;
__syncthreads();
}
// FP: "53 -> 54;
// FP: "54 -> 55;
{
const int warpid = threadIdx.x / 32;
// FP: "55 -> 56;
const int _np_laneid = cub::LaneId();
// FP: "56 -> 57;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type nbr;
nbr = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_nout[dst], (uint32_t)1);
bitset_nout.set(dst);
}
}
}
// FP: "74 -> 75;
__syncthreads();
// FP: "75 -> 76;
}
// FP: "76 -> 77;
__syncthreads();
// FP: "77 -> 78;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "78 -> 79;
while (_np.work())
{
// FP: "79 -> 80;
int _np_i =0;
// FP: "80 -> 81;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "81 -> 82;
__syncthreads();
// FP: "82 -> 83;
// FP: "83 -> 84;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type nbr;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
nbr= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(nbr);
atomicTestAdd(&p_nout[dst], (uint32_t)1);
bitset_nout.set(dst);
}
}
// FP: "92 -> 93;
_np.execute_round_done(ITSIZE);
// FP: "93 -> 94;
__syncthreads();
}
// FP: "95 -> 96;
assert(threadIdx.x < __kernel_tb_size);
}
// FP: "97 -> 98;
}
__global__ void PageRank_delta(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value, HGAccumulator<unsigned int> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_accum.thread_entry();
// FP: "3 -> 4;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_delta[src] = 0;
if (p_residual[src] > local_tolerance)
{
p_value[src] += p_residual[src];
if (p_nout[src] > 0)
{
p_delta[src] = p_residual[src] * (1 - local_alpha) / p_nout[src];
DGAccumulator_accum.reduce( 1);
}
p_residual[src] = 0;
}
}
}
// FP: "17 -> 18;
DGAccumulator_accum.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
// FP: "18 -> 19;
}
__global__ void PageRank(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, float * p_residual)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_PageRank;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
_np_closure[threadIdx.x].src = src;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type nbr;
nbr = ns +_np_j;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
if (p_delta[dst] > 0)
{
atomicTestAdd(&p_residual[src], p_delta[dst]);
}
}
}
// FP: "53 -> 54;
__syncthreads();
}
// FP: "55 -> 56;
// FP: "56 -> 57;
{
const int warpid = threadIdx.x / 32;
// FP: "57 -> 58;
const int _np_laneid = cub::LaneId();
// FP: "58 -> 59;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type nbr;
nbr = _np_w_start +_np_ii;
{
index_type dst;
dst = graph.getAbsDestination(nbr);
if (p_delta[dst] > 0)
{
atomicTestAdd(&p_residual[src], p_delta[dst]);
}
}
}
}
// FP: "78 -> 79;
__syncthreads();
// FP: "79 -> 80;
}
// FP: "80 -> 81;
__syncthreads();
// FP: "81 -> 82;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "82 -> 83;
while (_np.work())
{
// FP: "83 -> 84;
int _np_i =0;
// FP: "84 -> 85;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "85 -> 86;
__syncthreads();
// FP: "86 -> 87;
// FP: "87 -> 88;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type nbr;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
nbr= nps.fg.itvalue[_np_i];
{
index_type dst;
dst = graph.getAbsDestination(nbr);
if (p_delta[dst] > 0)
{
atomicTestAdd(&p_residual[src], p_delta[dst]);
}
}
}
// FP: "98 -> 99;
_np.execute_round_done(ITSIZE);
// FP: "99 -> 100;
__syncthreads();
}
// FP: "101 -> 102;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "103 -> 104;
}
__global__ void PageRankSanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float local_tolerance, float * p_residual, float * p_value, HGAccumulator<uint64_t> DGAccumulator_residual_over_tolerance, HGAccumulator<float> DGAccumulator_sum, HGAccumulator<float> DGAccumulator_sum_residual, HGReduceMax<float> max_residual, HGReduceMax<float> max_value, HGReduceMin<float> min_residual, HGReduceMin<float> min_value)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ cub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_residual_over_tolerance_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_residual_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage max_residual_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage max_value_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage min_residual_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage min_value_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_residual_over_tolerance.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGAccumulator_sum.thread_entry();
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_sum_residual.thread_entry();
// FP: "7 -> 8;
// FP: "8 -> 9;
max_residual.thread_entry();
// FP: "9 -> 10;
// FP: "10 -> 11;
max_value.thread_entry();
// FP: "11 -> 12;
// FP: "12 -> 13;
min_residual.thread_entry();
// FP: "13 -> 14;
// FP: "14 -> 15;
min_value.thread_entry();
// FP: "15 -> 16;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
max_value.reduce(p_value[src]);
min_value.reduce(p_value[src]);
max_residual.reduce(p_residual[src]);
min_residual.reduce(p_residual[src]);
DGAccumulator_sum.reduce( p_value[src]);
DGAccumulator_sum.reduce( p_residual[src]);
if (p_residual[src] > local_tolerance)
{
DGAccumulator_residual_over_tolerance.reduce( 1);
}
}
}
// FP: "29 -> 30;
DGAccumulator_residual_over_tolerance.thread_exit<cub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_residual_over_tolerance_ts);
// FP: "30 -> 31;
DGAccumulator_sum.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "31 -> 32;
DGAccumulator_sum_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_residual_ts);
// FP: "32 -> 33;
max_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(max_residual_ts);
// FP: "33 -> 34;
max_value.thread_exit<cub::BlockReduce<float, TB_SIZE> >(max_value_ts);
// FP: "34 -> 35;
min_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(min_residual_ts);
// FP: "35 -> 36;
min_value.thread_exit<cub::BlockReduce<float, TB_SIZE> >(min_value_ts);
// FP: "36 -> 37;
}
void ResetGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
ResetGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, local_alpha, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void ResetGraph_allNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->gg.nnodes, local_alpha, ctx);
// FP: "2 -> 3;
}
void ResetGraph_masterNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, ctx);
// FP: "2 -> 3;
}
void ResetGraph_nodesWithEdges_cuda(const float & local_alpha, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
ResetGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeGraph <<<blocks, __tb_InitializeGraph>>>(ctx->gg, __begin, __end, ctx->nout.data.gpu_wr_ptr(), *(ctx->nout.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
PageRank_delta <<<blocks, threads>>>(ctx->gg, __begin, __end, local_alpha, local_tolerance, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void PageRank_delta_allNodes_cuda(unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_masterNodes_cuda(unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_delta_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_delta_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_alpha, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRank_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
PageRank <<<blocks, __tb_PageRank>>>(ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PageRank_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void PageRank_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void PageRank_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRank_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_residual_over_tolerance;
HGAccumulator<float> _DGAccumulator_sum;
HGAccumulator<float> _DGAccumulator_sum_residual;
HGReduceMax<float> _max_residual;
HGReduceMax<float> _max_value;
HGReduceMin<float> _min_residual;
HGReduceMin<float> _min_value;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_residual_over_toleranceval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_residual_over_toleranceval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_residual_over_tolerance.rv = DGAccumulator_residual_over_toleranceval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<float> DGAccumulator_sumval = Shared<float>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "12 -> 13;
Shared<float> DGAccumulator_sum_residualval = Shared<float>(1);
// FP: "13 -> 14;
// FP: "14 -> 15;
*(DGAccumulator_sum_residualval.cpu_wr_ptr()) = 0;
// FP: "15 -> 16;
_DGAccumulator_sum_residual.rv = DGAccumulator_sum_residualval.gpu_wr_ptr();
// FP: "16 -> 17;
Shared<float> max_residualval = Shared<float>(1);
// FP: "17 -> 18;
// FP: "18 -> 19;
*(max_residualval.cpu_wr_ptr()) = 0;
// FP: "19 -> 20;
_max_residual.rv = max_residualval.gpu_wr_ptr();
// FP: "20 -> 21;
Shared<float> max_valueval = Shared<float>(1);
// FP: "21 -> 22;
// FP: "22 -> 23;
*(max_valueval.cpu_wr_ptr()) = 0;
// FP: "23 -> 24;
_max_value.rv = max_valueval.gpu_wr_ptr();
// FP: "24 -> 25;
Shared<float> min_residualval = Shared<float>(1);
// FP: "25 -> 26;
// FP: "26 -> 27;
*(min_residualval.cpu_wr_ptr()) = 0;
// FP: "27 -> 28;
_min_residual.rv = min_residualval.gpu_wr_ptr();
// FP: "28 -> 29;
Shared<float> min_valueval = Shared<float>(1);
// FP: "29 -> 30;
// FP: "30 -> 31;
*(min_valueval.cpu_wr_ptr()) = 0;
// FP: "31 -> 32;
_min_value.rv = min_valueval.gpu_wr_ptr();
// FP: "32 -> 33;
PageRankSanity <<<blocks, threads>>>(ctx->gg, __begin, __end, local_tolerance, ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_residual_over_tolerance, _DGAccumulator_sum, _DGAccumulator_sum_residual, _max_residual, _max_value, _min_residual, _min_value);
// FP: "33 -> 34;
check_cuda_kernel;
// FP: "34 -> 35;
DGAccumulator_residual_over_tolerance = *(DGAccumulator_residual_over_toleranceval.cpu_rd_ptr());
// FP: "35 -> 36;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "36 -> 37;
DGAccumulator_sum_residual = *(DGAccumulator_sum_residualval.cpu_rd_ptr());
// FP: "37 -> 38;
max_residual = *(max_residualval.cpu_rd_ptr());
// FP: "38 -> 39;
max_value = *(max_valueval.cpu_rd_ptr());
// FP: "39 -> 40;
min_residual = *(min_residualval.cpu_rd_ptr());
// FP: "40 -> 41;
min_value = *(min_valueval.cpu_rd_ptr());
// FP: "41 -> 42;
}
void PageRankSanity_allNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->gg.nnodes, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_masterNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
void PageRankSanity_nodesWithEdges_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PageRankSanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx);
// FP: "2 -> 3;
}
|
911f7755468e2f8dc5e5c12c22ecfb6034708774.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_z
__global__
void magma_zlarfg_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
double xnorm;
magmaDoubleComplex dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaDoubleComplex alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_Z_REAL(alpha);
double alphai = MAGMA_Z_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_Z_MAKE(beta, 0.);
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
} else {
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfg_gpu( magma_int_t n, magmaDoubleComplex *dx0, magmaDoubleComplex *dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dznrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dznrm2_cols(n-1, 1, dx0+1, n, dxnorm);
hipLaunchKernelGGL(( magma_zlarfg_gpu_kernel), dim3(blocks), dim3(threads),
0, magma_stream , n, dx0, dx, dtau, dxnorm, dAkk);
}
| 911f7755468e2f8dc5e5c12c22ecfb6034708774.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_z
__global__
void magma_zlarfg_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
double xnorm;
magmaDoubleComplex dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaDoubleComplex alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_Z_REAL(alpha);
double alphai = MAGMA_Z_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_Z_MAKE(beta, 0.);
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
} else {
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfg_gpu( magma_int_t n, magmaDoubleComplex *dx0, magmaDoubleComplex *dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dznrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dznrm2_cols(n-1, 1, dx0+1, n, dxnorm);
magma_zlarfg_gpu_kernel<<< blocks, threads,
0, magma_stream >>>(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
aaab3af8b6841b54113f9ae7eff8f0334bdfe5ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <psc_cuda.h>
// FIXME, do this always?
#define NO_CHECKERBOARD
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 4
#define BLOCKSIZE_Z 4
#define PFX(x) sort_##x
#include "constants.c"
// FIXME, use const mem for some params
#if 0
__global__ static void find_cell_indices_by_cell(int n_part, particles_cuda_dev_t h_dev,
int *d_cnis, int *d_ids, int ldims_y)
{
int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i < n_part) {
particle_cuda_real_t xi[3] = {
h_dev.xi4[i].x * d_consts.dxi[0],
h_dev.xi4[i].y * d_consts.dxi[1],
h_dev.xi4[i].z * d_consts.dxi[2] };
int pos[3];
for (int d = 0; d < 3; d++) {
pos[d] = __float2int_rd(xi[d]);
}
int idx = (((pos[2] / 8) * (ldims_y / 8) + (pos[1] / 8)) << 6);
idx |=
((pos[2] & 4) << 3) |
((pos[1] & 4) << 2);
idx |=
((pos[2] & 2) << 2) |
((pos[1] & 2) << 1) |
((pos[2] & 1) << 1) |
((pos[1] & 1) << 0);
d_cnis[i] = idx;
d_ids[i] = i;
}
}
static void
sort_find_cell_indices_by_cell_device(struct psc_particles *prts, struct psc_patch *patch,
int *d_cnis, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
int dimBlock[2] = { THREADS_PER_BLOCK, 1 };
int dimGrid[2] = { (prts->n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 };
RUN_KERNEL(dimGrid, dimBlock,
find_cell_indices_by_cell, (prts->n_part, *cuda->h_dev, d_cnis, d_ids,
patch->ldims[1]));
}
// FIXME, specific to 1x8x8, should be in ! .cu, so that cell_map works
static void __unused
sort_find_cell_indices_host(struct psc_particles *prts, struct psc_patch *patch,
int *d_cnis, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
int n_part = prts->n_part;
particles_cuda_dev_t *h_dev = cuda->h_dev;
int *h_cnis = (int *) malloc(n_part * sizeof(*h_cnis));
int *h_ids = (int *) malloc(n_part * sizeof(*h_ids));
float4 *h_xi4 = (float4 *) malloc(n_part * sizeof(*h_xi4));
check(hipMemcpy(h_xi4, h_dev->xi4, n_part * sizeof(float4),
hipMemcpyDeviceToHost));
for (int i = 0; i < n_part; i++) {
particle_cuda_real_t dxi = 1.f / ppsc->dx[0];
particle_cuda_real_t dyi = 1.f / ppsc->dx[1];
particle_cuda_real_t dzi = 1.f / ppsc->dx[2];
particle_cuda_real_t xi[3] = {
(h_xi4[i].x - patch->xb[0]) * dxi,
(h_xi4[i].y - patch->xb[1]) * dyi,
(h_xi4[i].z - patch->xb[2]) * dzi };
int pos[3];
for (int d = 0; d < 3; d++) {
pos[d] = particle_cuda_real_fint(xi[d]);
}
int idx = (((pos[2] / 8) * (patch->ldims[1] / 8) + (pos[1] / 8)) << 6) |
((pos[2] & 4) << 3) |
((pos[2] & 2) << 2) |
((pos[2] & 1) << 1) |
((pos[1] & 4) << 2) |
((pos[1] & 2) << 1) |
((pos[1] & 1) << 0);
h_cnis[i] = idx;
h_ids[i] = i;
}
check(hipMemcpy(d_cnis, h_cnis, n_part * sizeof(*h_cnis),
hipMemcpyHostToDevice));
check(hipMemcpy(d_ids, h_ids, n_part * sizeof(*h_ids),
hipMemcpyHostToDevice));
free(h_xi4);
free(h_cnis);
free(h_ids);
}
static void __unused
sort_reorder_host(struct psc_particles *prts, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
int n_part = prts->n_part;
particles_cuda_dev_t *h_dev = cuda->h_dev;
int *h_ids = (int *) malloc(n_part);
float4 *h_xi4 = (float4 *) malloc(n_part * sizeof(*h_xi4));
float4 *h_pxi4 = (float4 *) malloc(n_part * sizeof(*h_pxi4));
check(hipMemcpy(h_xi4, h_dev->xi4, n_part * sizeof(float4),
hipMemcpyDeviceToHost));
check(hipMemcpy(h_pxi4, h_dev->pxi4, n_part * sizeof(float4),
hipMemcpyDeviceToHost));
check(hipMemcpy(h_ids, d_ids, n_part * sizeof(*h_ids),
hipMemcpyDeviceToHost));
// move into new position
float4 *xi4 = (float4 *) malloc(n_part * sizeof(*xi4));
float4 *pxi4 = (float4 *) malloc(n_part * sizeof(*pxi4));
for (int i = 0; i < n_part; i++) {
xi4[i] = h_xi4[h_ids[i]];
pxi4[i] = h_pxi4[h_ids[i]];
}
check(hipMemcpy(h_dev->xi4, xi4, n_part * sizeof(float4),
hipMemcpyHostToDevice));
check(hipMemcpy(h_dev->pxi4, pxi4, n_part * sizeof(float4),
hipMemcpyHostToDevice));
free(xi4);
free(pxi4);
free(h_xi4);
free(h_pxi4);
free(h_ids);
}
__global__ static void
sort_reorder_by_cell(int n_part, particles_cuda_dev_t h_dev, float4 *xi4, float4 *pxi4,
int *d_cnis, int *d_ids)
{
int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i < n_part) {
xi4[i] = h_dev.xi4[d_ids[i]];
pxi4[i] = h_dev.pxi4[d_ids[i]];
}
}
static void
sort_reorder_by_cell_device(struct psc_particles *prts, int *d_cnis, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
assert(0); // need to use alt_*, can't alloc/free
float4 *xi4, *pxi4;
check(hipMalloc((void **) &xi4, prts->n_part * sizeof(*xi4)));
check(hipMalloc((void **) &pxi4, prts->n_part * sizeof(*pxi4)));
int dimBlock[2] = { THREADS_PER_BLOCK, 1 };
int dimGrid[2] = { (prts->n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 };
RUN_KERNEL(dimGrid, dimBlock,
sort_reorder_by_cell, (prts->n_part, *cuda->h_dev, xi4, pxi4, d_cnis, d_ids));
check(hipFree(cuda->h_dev->xi4));
check(hipFree(cuda->h_dev->pxi4));
cuda->h_dev->xi4 = xi4;
cuda->h_dev->pxi4 = pxi4;
}
EXTERN_C void
sort_patch_prep(int p, struct psc_particles *prts, int **d_cnis, int **d_ids)
{
check(hipMalloc((void **) d_cnis, prts->n_part * sizeof(*d_cnis)));
check(hipMalloc((void **) d_ids, prts->n_part * sizeof(*d_ids)));
sort_set_constants(prts, NULL);
}
EXTERN_C void
sort_patch_done(int p, struct psc_particles *prts, int *d_cnis, int *d_ids)
{
check(hipFree(d_cnis));
check(hipFree(d_ids));
}
EXTERN_C void
sort_patch_by_cell(int p, struct psc_particles *prts)
{
struct psc_patch *patch = &ppsc->patch[p];
int *d_cnis, *d_ids;
check(hipMalloc((void **) &d_cnis, prts->n_part * sizeof(*d_cnis)));
check(hipMalloc((void **) &d_ids, prts->n_part * sizeof(*d_ids)));
sort_set_constants(prts, NULL);
sort_find_cell_indices_by_cell_device(prts, patch, d_cnis, d_ids);
sort_pairs_device((unsigned int *) d_cnis, (unsigned int *) d_ids, prts->n_part);
sort_reorder_by_cell_device(prts, d_cnis, d_ids);
check(hipFree(d_cnis));
check(hipFree(d_ids));
}
#endif
| aaab3af8b6841b54113f9ae7eff8f0334bdfe5ed.cu |
#include <psc_cuda.h>
// FIXME, do this always?
#define NO_CHECKERBOARD
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 4
#define BLOCKSIZE_Z 4
#define PFX(x) sort_##x
#include "constants.c"
// FIXME, use const mem for some params
#if 0
__global__ static void find_cell_indices_by_cell(int n_part, particles_cuda_dev_t h_dev,
int *d_cnis, int *d_ids, int ldims_y)
{
int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i < n_part) {
particle_cuda_real_t xi[3] = {
h_dev.xi4[i].x * d_consts.dxi[0],
h_dev.xi4[i].y * d_consts.dxi[1],
h_dev.xi4[i].z * d_consts.dxi[2] };
int pos[3];
for (int d = 0; d < 3; d++) {
pos[d] = __float2int_rd(xi[d]);
}
int idx = (((pos[2] / 8) * (ldims_y / 8) + (pos[1] / 8)) << 6);
idx |=
((pos[2] & 4) << 3) |
((pos[1] & 4) << 2);
idx |=
((pos[2] & 2) << 2) |
((pos[1] & 2) << 1) |
((pos[2] & 1) << 1) |
((pos[1] & 1) << 0);
d_cnis[i] = idx;
d_ids[i] = i;
}
}
static void
sort_find_cell_indices_by_cell_device(struct psc_particles *prts, struct psc_patch *patch,
int *d_cnis, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
int dimBlock[2] = { THREADS_PER_BLOCK, 1 };
int dimGrid[2] = { (prts->n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 };
RUN_KERNEL(dimGrid, dimBlock,
find_cell_indices_by_cell, (prts->n_part, *cuda->h_dev, d_cnis, d_ids,
patch->ldims[1]));
}
// FIXME, specific to 1x8x8, should be in ! .cu, so that cell_map works
static void __unused
sort_find_cell_indices_host(struct psc_particles *prts, struct psc_patch *patch,
int *d_cnis, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
int n_part = prts->n_part;
particles_cuda_dev_t *h_dev = cuda->h_dev;
int *h_cnis = (int *) malloc(n_part * sizeof(*h_cnis));
int *h_ids = (int *) malloc(n_part * sizeof(*h_ids));
float4 *h_xi4 = (float4 *) malloc(n_part * sizeof(*h_xi4));
check(cudaMemcpy(h_xi4, h_dev->xi4, n_part * sizeof(float4),
cudaMemcpyDeviceToHost));
for (int i = 0; i < n_part; i++) {
particle_cuda_real_t dxi = 1.f / ppsc->dx[0];
particle_cuda_real_t dyi = 1.f / ppsc->dx[1];
particle_cuda_real_t dzi = 1.f / ppsc->dx[2];
particle_cuda_real_t xi[3] = {
(h_xi4[i].x - patch->xb[0]) * dxi,
(h_xi4[i].y - patch->xb[1]) * dyi,
(h_xi4[i].z - patch->xb[2]) * dzi };
int pos[3];
for (int d = 0; d < 3; d++) {
pos[d] = particle_cuda_real_fint(xi[d]);
}
int idx = (((pos[2] / 8) * (patch->ldims[1] / 8) + (pos[1] / 8)) << 6) |
((pos[2] & 4) << 3) |
((pos[2] & 2) << 2) |
((pos[2] & 1) << 1) |
((pos[1] & 4) << 2) |
((pos[1] & 2) << 1) |
((pos[1] & 1) << 0);
h_cnis[i] = idx;
h_ids[i] = i;
}
check(cudaMemcpy(d_cnis, h_cnis, n_part * sizeof(*h_cnis),
cudaMemcpyHostToDevice));
check(cudaMemcpy(d_ids, h_ids, n_part * sizeof(*h_ids),
cudaMemcpyHostToDevice));
free(h_xi4);
free(h_cnis);
free(h_ids);
}
static void __unused
sort_reorder_host(struct psc_particles *prts, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
int n_part = prts->n_part;
particles_cuda_dev_t *h_dev = cuda->h_dev;
int *h_ids = (int *) malloc(n_part);
float4 *h_xi4 = (float4 *) malloc(n_part * sizeof(*h_xi4));
float4 *h_pxi4 = (float4 *) malloc(n_part * sizeof(*h_pxi4));
check(cudaMemcpy(h_xi4, h_dev->xi4, n_part * sizeof(float4),
cudaMemcpyDeviceToHost));
check(cudaMemcpy(h_pxi4, h_dev->pxi4, n_part * sizeof(float4),
cudaMemcpyDeviceToHost));
check(cudaMemcpy(h_ids, d_ids, n_part * sizeof(*h_ids),
cudaMemcpyDeviceToHost));
// move into new position
float4 *xi4 = (float4 *) malloc(n_part * sizeof(*xi4));
float4 *pxi4 = (float4 *) malloc(n_part * sizeof(*pxi4));
for (int i = 0; i < n_part; i++) {
xi4[i] = h_xi4[h_ids[i]];
pxi4[i] = h_pxi4[h_ids[i]];
}
check(cudaMemcpy(h_dev->xi4, xi4, n_part * sizeof(float4),
cudaMemcpyHostToDevice));
check(cudaMemcpy(h_dev->pxi4, pxi4, n_part * sizeof(float4),
cudaMemcpyHostToDevice));
free(xi4);
free(pxi4);
free(h_xi4);
free(h_pxi4);
free(h_ids);
}
__global__ static void
sort_reorder_by_cell(int n_part, particles_cuda_dev_t h_dev, float4 *xi4, float4 *pxi4,
int *d_cnis, int *d_ids)
{
int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i < n_part) {
xi4[i] = h_dev.xi4[d_ids[i]];
pxi4[i] = h_dev.pxi4[d_ids[i]];
}
}
static void
sort_reorder_by_cell_device(struct psc_particles *prts, int *d_cnis, int *d_ids)
{
struct psc_particles_cuda *cuda = psc_particles_cuda(prts);
assert(0); // need to use alt_*, can't alloc/free
float4 *xi4, *pxi4;
check(cudaMalloc((void **) &xi4, prts->n_part * sizeof(*xi4)));
check(cudaMalloc((void **) &pxi4, prts->n_part * sizeof(*pxi4)));
int dimBlock[2] = { THREADS_PER_BLOCK, 1 };
int dimGrid[2] = { (prts->n_part + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, 1 };
RUN_KERNEL(dimGrid, dimBlock,
sort_reorder_by_cell, (prts->n_part, *cuda->h_dev, xi4, pxi4, d_cnis, d_ids));
check(cudaFree(cuda->h_dev->xi4));
check(cudaFree(cuda->h_dev->pxi4));
cuda->h_dev->xi4 = xi4;
cuda->h_dev->pxi4 = pxi4;
}
EXTERN_C void
sort_patch_prep(int p, struct psc_particles *prts, int **d_cnis, int **d_ids)
{
check(cudaMalloc((void **) d_cnis, prts->n_part * sizeof(*d_cnis)));
check(cudaMalloc((void **) d_ids, prts->n_part * sizeof(*d_ids)));
sort_set_constants(prts, NULL);
}
EXTERN_C void
sort_patch_done(int p, struct psc_particles *prts, int *d_cnis, int *d_ids)
{
check(cudaFree(d_cnis));
check(cudaFree(d_ids));
}
EXTERN_C void
sort_patch_by_cell(int p, struct psc_particles *prts)
{
struct psc_patch *patch = &ppsc->patch[p];
int *d_cnis, *d_ids;
check(cudaMalloc((void **) &d_cnis, prts->n_part * sizeof(*d_cnis)));
check(cudaMalloc((void **) &d_ids, prts->n_part * sizeof(*d_ids)));
sort_set_constants(prts, NULL);
sort_find_cell_indices_by_cell_device(prts, patch, d_cnis, d_ids);
sort_pairs_device((unsigned int *) d_cnis, (unsigned int *) d_ids, prts->n_part);
sort_reorder_by_cell_device(prts, d_cnis, d_ids);
check(cudaFree(d_cnis));
check(cudaFree(d_ids));
}
#endif
|
a23cfa29aeed1c01213c5b6785d9907e7f065d29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "file.h"
#include "common.h"
#include "cuerr.h"
//#include "kernels.hip"
__extern__shared__ float sh_A0[];
__global__ void block2D_hybrid_coarsen_x(float c0,float c1,float *A0,float *Anext, int nx, int ny, int nz)
{
//thread coarsening along x direction
const int i = blockIdx.x*blockDim.x*2+threadIdx.x;
const int i2= blockIdx.x*blockDim.x*2+threadIdx.x+blockDim.x;
const int j = blockIdx.y*blockDim.y+threadIdx.y;
const int sh_id=threadIdx.x + threadIdx.y*blockDim.x*2;
const int sh_id2=threadIdx.x +blockDim.x+ threadIdx.y*blockDim.x*2;
//shared memeory
//extern __shared__ float sh_A0[];
sh_A0[sh_id]=0.0f;
sh_A0[sh_id2]=0.0f;
__syncthreads();
//get available region for load and store
const bool w_region = i>0 && j>0 &&(i<(nx-1)) &&(j<(ny-1)) ;
const bool w_region2 = j>0 &&(i2<nx-1) &&(j<ny-1) ;
const bool x_l_bound = (threadIdx.x==0);
const bool x_h_bound = ((threadIdx.x+blockDim.x)==(blockDim.x*2-1));
const bool y_l_bound = (threadIdx.y==0);
const bool y_h_bound = (threadIdx.y==(blockDim.y-1));
//register for bottom and top planes
//because of thread coarsening, we need to doulbe registers
float bottom=0.0f,bottom2=0.0f,top=0.0f,top2=0.0f;
//load data for bottom and current
if((i<nx) &&(j<ny))
{
bottom=A0[Index3D (nx, ny, i, j, 0)];
sh_A0[sh_id]=A0[Index3D (nx, ny, i, j, 1)];
}
if((i2<nx) &&(j<ny))
{
bottom2=A0[Index3D (nx, ny, i2, j, 0)];
sh_A0[sh_id2]=A0[Index3D (nx, ny, i2, j, 1)];
}
__syncthreads();
for(int k=1;k<nz-1;k++)
{
float a_left_right,a_up,a_down;
//load required data on xy planes
//if it on shared memory, load from shared memory
//if not, load from global memory
if((i<nx) &&(j<ny))
top=A0[Index3D (nx, ny, i, j, k+1)];
if(w_region)
{
a_up =y_h_bound?A0[Index3D (nx, ny, i, j+1, k )]:sh_A0[sh_id+2*blockDim.x];
a_down =y_l_bound?A0[Index3D (nx, ny, i, j-1, k )]:sh_A0[sh_id-2*blockDim.x];
a_left_right=x_l_bound?A0[Index3D (nx, ny, i-1, j, k )]:sh_A0[sh_id-1];
Anext[Index3D (nx, ny, i, j, k)] = (top + bottom + a_up + a_down + sh_A0[sh_id+1] +a_left_right)*c1
- sh_A0[sh_id]*c0;
}
//load another block
if((i2<nx) &&(j<ny))
top2=A0[Index3D (nx, ny, i2, j, k+1)];
if(w_region2)
{
a_up =y_h_bound?A0[Index3D (nx, ny, i2, j+1, k )]:sh_A0[sh_id2+2*blockDim.x];
a_down =y_l_bound?A0[Index3D (nx, ny, i2, j-1, k )]:sh_A0[sh_id2-2*blockDim.x];
a_left_right=x_h_bound?A0[Index3D (nx, ny, i2+1, j, k )]:sh_A0[sh_id2+1];
Anext[Index3D (nx, ny, i2, j, k)] = (top2 + bottom2 + a_up + a_down + a_left_right +sh_A0[sh_id2-1])*c1
- sh_A0[sh_id2]*c0;
}
//swap data
__syncthreads();
bottom=sh_A0[sh_id];
sh_A0[sh_id]=top;
bottom2=sh_A0[sh_id2];
sh_A0[sh_id2]=top2;
__syncthreads();
}
}
static int read_data(float *A0, int nx,int ny,int nz,FILE *fp)
{
int s=0;
for(int i=0;i<nz;i++)
{
for(int j=0;j<ny;j++)
{
for(int k=0;k<nx;k++)
{
fread(A0+s,sizeof(float),1,fp);
s++;
}
}
}
return 0;
}
int main(int argc, char** argv) {
//struct pb_TimerSet timers;
//struct pb_Parameters *parameters;
printf("CUDA accelerated 7 points stencil codes****\n");
printf("Original version by Li-Wen Chang <[email protected]> and I-Jui Sung<[email protected]>\n");
printf("This version maintained by Chris Rodrigues ***********\n");
//parameters = pb_ReadParameters(&argc, argv);
//pb_InitializeTimerSet(&timers);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//declaration
int nx,ny,nz;
int size;
int iteration;
float c0=1.0f/6.0f;
float c1=1.0f/6.0f/6.0f;
/*if (argc<5)
{
printf("Usage: probe nx ny nz tx ty t\n"
"nx: the grid size x\n"
"ny: the grid size y\n"
"nz: the grid size z\n"
"t: the iteration time\n");
return -1;
}*/
//nx = atoi(argv[1]);
nx = 128;
//if (nx<1)
// return -1;
ny = 128;
//if (ny<1)
// return -1;
nz = 32;
//if (nz<1)
// return -1;
//iteration = atoi(argv[4]);
//if(iteration<1)
// return -1;
//host data
float *h_A0;
float *h_Anext;
//device
float *d_A0;
float *d_Anext;
size=nx*ny*nz;
h_A0=(float*)malloc(sizeof(float)*size);
h_Anext=(float*)malloc(sizeof(float)*size);
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
//FILE *fp = fopen(parameters->inpFiles[0], "rb");
FILE *fp = fopen("input/128x128x32.bin", "rb");
printf("before read_data \n");
//read_data(h_A0, nx,ny,nz,fp);
fclose(fp);
printf("after read_data \n");
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
//memory allocation
hipMalloc((void **)&d_A0, size*sizeof(float));
hipMalloc((void **)&d_Anext, size*sizeof(float));
hipMemset(d_Anext,0,size*sizeof(float));
hipMemset(h_A0,0,size*sizeof(float));
//memory copy
hipMemcpy(d_A0, h_A0, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Anext, d_A0, size*sizeof(float), hipMemcpyDeviceToDevice);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//only use tx-by-ty threads
int tx=32;
int ty=4;
dim3 block (tx, ty, 1);
//also change threads size maping from tx by ty to 2tx x ty
dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1);
int sh_size = tx*2*ty*sizeof(float);
//main execution
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
iteration = 1;
for(int t=0;t<iteration;t++)
{
hipLaunchKernelGGL(( block2D_hybrid_coarsen_x), dim3(grid), dim3(block),sh_size, 0, c0,c1, d_A0, d_Anext, nx, ny, nz);
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
}
CUERR // check and clear any existing errors
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMemcpy(h_Anext, d_Anext,size*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_A0);
hipFree(d_Anext);
/*
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(parameters->outFile,h_Anext,nx,ny,nz);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
*/
free (h_A0);
free (h_Anext);
//pb_SwitchToTimer(&timers, pb_TimerID_NONE);
//pb_PrintTimerSet(&timers);
//pb_FreeParameters(parameters);
return 0;
}
| a23cfa29aeed1c01213c5b6785d9907e7f065d29.cu |
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include "file.h"
#include "common.h"
#include "cuerr.h"
//#include "kernels.cu"
__extern__shared__ float sh_A0[];
__global__ void block2D_hybrid_coarsen_x(float c0,float c1,float *A0,float *Anext, int nx, int ny, int nz)
{
//thread coarsening along x direction
const int i = blockIdx.x*blockDim.x*2+threadIdx.x;
const int i2= blockIdx.x*blockDim.x*2+threadIdx.x+blockDim.x;
const int j = blockIdx.y*blockDim.y+threadIdx.y;
const int sh_id=threadIdx.x + threadIdx.y*blockDim.x*2;
const int sh_id2=threadIdx.x +blockDim.x+ threadIdx.y*blockDim.x*2;
//shared memeory
//extern __shared__ float sh_A0[];
sh_A0[sh_id]=0.0f;
sh_A0[sh_id2]=0.0f;
__syncthreads();
//get available region for load and store
const bool w_region = i>0 && j>0 &&(i<(nx-1)) &&(j<(ny-1)) ;
const bool w_region2 = j>0 &&(i2<nx-1) &&(j<ny-1) ;
const bool x_l_bound = (threadIdx.x==0);
const bool x_h_bound = ((threadIdx.x+blockDim.x)==(blockDim.x*2-1));
const bool y_l_bound = (threadIdx.y==0);
const bool y_h_bound = (threadIdx.y==(blockDim.y-1));
//register for bottom and top planes
//because of thread coarsening, we need to doulbe registers
float bottom=0.0f,bottom2=0.0f,top=0.0f,top2=0.0f;
//load data for bottom and current
if((i<nx) &&(j<ny))
{
bottom=A0[Index3D (nx, ny, i, j, 0)];
sh_A0[sh_id]=A0[Index3D (nx, ny, i, j, 1)];
}
if((i2<nx) &&(j<ny))
{
bottom2=A0[Index3D (nx, ny, i2, j, 0)];
sh_A0[sh_id2]=A0[Index3D (nx, ny, i2, j, 1)];
}
__syncthreads();
for(int k=1;k<nz-1;k++)
{
float a_left_right,a_up,a_down;
//load required data on xy planes
//if it on shared memory, load from shared memory
//if not, load from global memory
if((i<nx) &&(j<ny))
top=A0[Index3D (nx, ny, i, j, k+1)];
if(w_region)
{
a_up =y_h_bound?A0[Index3D (nx, ny, i, j+1, k )]:sh_A0[sh_id+2*blockDim.x];
a_down =y_l_bound?A0[Index3D (nx, ny, i, j-1, k )]:sh_A0[sh_id-2*blockDim.x];
a_left_right=x_l_bound?A0[Index3D (nx, ny, i-1, j, k )]:sh_A0[sh_id-1];
Anext[Index3D (nx, ny, i, j, k)] = (top + bottom + a_up + a_down + sh_A0[sh_id+1] +a_left_right)*c1
- sh_A0[sh_id]*c0;
}
//load another block
if((i2<nx) &&(j<ny))
top2=A0[Index3D (nx, ny, i2, j, k+1)];
if(w_region2)
{
a_up =y_h_bound?A0[Index3D (nx, ny, i2, j+1, k )]:sh_A0[sh_id2+2*blockDim.x];
a_down =y_l_bound?A0[Index3D (nx, ny, i2, j-1, k )]:sh_A0[sh_id2-2*blockDim.x];
a_left_right=x_h_bound?A0[Index3D (nx, ny, i2+1, j, k )]:sh_A0[sh_id2+1];
Anext[Index3D (nx, ny, i2, j, k)] = (top2 + bottom2 + a_up + a_down + a_left_right +sh_A0[sh_id2-1])*c1
- sh_A0[sh_id2]*c0;
}
//swap data
__syncthreads();
bottom=sh_A0[sh_id];
sh_A0[sh_id]=top;
bottom2=sh_A0[sh_id2];
sh_A0[sh_id2]=top2;
__syncthreads();
}
}
static int read_data(float *A0, int nx,int ny,int nz,FILE *fp)
{
int s=0;
for(int i=0;i<nz;i++)
{
for(int j=0;j<ny;j++)
{
for(int k=0;k<nx;k++)
{
fread(A0+s,sizeof(float),1,fp);
s++;
}
}
}
return 0;
}
int main(int argc, char** argv) {
//struct pb_TimerSet timers;
//struct pb_Parameters *parameters;
printf("CUDA accelerated 7 points stencil codes****\n");
printf("Original version by Li-Wen Chang <[email protected]> and I-Jui Sung<[email protected]>\n");
printf("This version maintained by Chris Rodrigues ***********\n");
//parameters = pb_ReadParameters(&argc, argv);
//pb_InitializeTimerSet(&timers);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//declaration
int nx,ny,nz;
int size;
int iteration;
float c0=1.0f/6.0f;
float c1=1.0f/6.0f/6.0f;
/*if (argc<5)
{
printf("Usage: probe nx ny nz tx ty t\n"
"nx: the grid size x\n"
"ny: the grid size y\n"
"nz: the grid size z\n"
"t: the iteration time\n");
return -1;
}*/
//nx = atoi(argv[1]);
nx = 128;
//if (nx<1)
// return -1;
ny = 128;
//if (ny<1)
// return -1;
nz = 32;
//if (nz<1)
// return -1;
//iteration = atoi(argv[4]);
//if(iteration<1)
// return -1;
//host data
float *h_A0;
float *h_Anext;
//device
float *d_A0;
float *d_Anext;
size=nx*ny*nz;
h_A0=(float*)malloc(sizeof(float)*size);
h_Anext=(float*)malloc(sizeof(float)*size);
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
//FILE *fp = fopen(parameters->inpFiles[0], "rb");
FILE *fp = fopen("input/128x128x32.bin", "rb");
printf("before read_data \n");
//read_data(h_A0, nx,ny,nz,fp);
fclose(fp);
printf("after read_data \n");
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
//memory allocation
cudaMalloc((void **)&d_A0, size*sizeof(float));
cudaMalloc((void **)&d_Anext, size*sizeof(float));
cudaMemset(d_Anext,0,size*sizeof(float));
cudaMemset(h_A0,0,size*sizeof(float));
//memory copy
cudaMemcpy(d_A0, h_A0, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Anext, d_A0, size*sizeof(float), cudaMemcpyDeviceToDevice);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//only use tx-by-ty threads
int tx=32;
int ty=4;
dim3 block (tx, ty, 1);
//also change threads size maping from tx by ty to 2tx x ty
dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1);
int sh_size = tx*2*ty*sizeof(float);
//main execution
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
iteration = 1;
for(int t=0;t<iteration;t++)
{
block2D_hybrid_coarsen_x<<<grid, block,sh_size>>>(c0,c1, d_A0, d_Anext, nx, ny, nz);
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
}
CUERR // check and clear any existing errors
float *d_temp = d_A0;
d_A0 = d_Anext;
d_Anext = d_temp;
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMemcpy(h_Anext, d_Anext,size*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_A0);
cudaFree(d_Anext);
/*
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(parameters->outFile,h_Anext,nx,ny,nz);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
*/
free (h_A0);
free (h_Anext);
//pb_SwitchToTimer(&timers, pb_TimerID_NONE);
//pb_PrintTimerSet(&timers);
//pb_FreeParameters(parameters);
return 0;
}
|
909fa33f5653fd1946fad77b0f48b5ba39cfe96b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void coalesced(float *A, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i];
} | 909fa33f5653fd1946fad77b0f48b5ba39cfe96b.cu | #include "includes.h"
__global__ void coalesced(float *A, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i];
} |
45296027b6ca65b8e5ae941f542df118d0fdf540.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: nvptx-registered-target
// REQUIRES: amdgpu-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck -check-prefixes=DEVICE,NVPTX %s
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck -check-prefixes=HOST %s
// RUN: %clang_cc1 -triple amdgcn -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck -check-prefixes=DEVICE,AMDGCN %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with Sema tests.
#include "Inputs/cuda-initializers.h"
__device__ int d_v;
// DEVICE: @d_v = addrspace(1) externally_initialized global i32 0,
// HOST: @d_v = internal global i32 undef,
__shared__ int s_v;
// DEVICE: @s_v = addrspace(3) global i32 undef,
// HOST: @s_v = internal global i32 undef,
__constant__ int c_v;
// DEVICE: addrspace(4) externally_initialized global i32 0,
// HOST: @c_v = internal global i32 undef,
__device__ int d_v_i = 1;
// DEVICE: @d_v_i = addrspace(1) externally_initialized global i32 1,
// HOST: @d_v_i = internal global i32 undef,
// For `static` device variables, assume they won't be addressed from the host
// side.
static __device__ int d_s_v_i = 1;
// DEVICE: @_ZL7d_s_v_i = internal addrspace(1) global i32 1,
// Dummy function to keep static variables referenced.
__device__ int foo() {
return d_s_v_i;
}
// trivial constructor -- allowed
__device__ T d_t;
// DEVICE: @d_t = addrspace(1) externally_initialized global %struct.T zeroinitializer
// HOST: @d_t = internal global %struct.T undef,
__shared__ T s_t;
// DEVICE: @s_t = addrspace(3) global %struct.T undef,
// HOST: @s_t = internal global %struct.T undef,
__constant__ T c_t;
// DEVICE: @c_t = addrspace(4) externally_initialized global %struct.T zeroinitializer,
// HOST: @c_t = internal global %struct.T undef,
__device__ T d_t_i = {2};
// DEVICE: @d_t_i = addrspace(1) externally_initialized global %struct.T { i32 2 },
// HOST: @d_t_i = internal global %struct.T undef,
__constant__ T c_t_i = {2};
// DEVICE: @c_t_i = addrspace(4) externally_initialized global %struct.T { i32 2 },
// HOST: @c_t_i = internal global %struct.T undef,
// empty constructor
__device__ EC d_ec;
// DEVICE: @d_ec = addrspace(1) externally_initialized global %struct.EC zeroinitializer,
// HOST: @d_ec = internal global %struct.EC undef,
__shared__ EC s_ec;
// DEVICE: @s_ec = addrspace(3) global %struct.EC undef,
// HOST: @s_ec = internal global %struct.EC undef,
__constant__ EC c_ec;
// DEVICE: @c_ec = addrspace(4) externally_initialized global %struct.EC zeroinitializer,
// HOST: @c_ec = internal global %struct.EC undef
// empty destructor
__device__ ED d_ed;
// DEVICE: @d_ed = addrspace(1) externally_initialized global %struct.ED zeroinitializer,
// HOST: @d_ed = internal global %struct.ED undef,
__shared__ ED s_ed;
// DEVICE: @s_ed = addrspace(3) global %struct.ED undef,
// HOST: @s_ed = internal global %struct.ED undef,
__constant__ ED c_ed;
// DEVICE: @c_ed = addrspace(4) externally_initialized global %struct.ED zeroinitializer,
// HOST: @c_ed = internal global %struct.ED undef,
__device__ ECD d_ecd;
// DEVICE: @d_ecd = addrspace(1) externally_initialized global %struct.ECD zeroinitializer,
// HOST: @d_ecd = internal global %struct.ECD undef,
__shared__ ECD s_ecd;
// DEVICE: @s_ecd = addrspace(3) global %struct.ECD undef,
// HOST: @s_ecd = internal global %struct.ECD undef,
__constant__ ECD c_ecd;
// DEVICE: @c_ecd = addrspace(4) externally_initialized global %struct.ECD zeroinitializer,
// HOST: @c_ecd = internal global %struct.ECD undef,
// empty templated constructor -- allowed with no arguments
__device__ ETC d_etc;
// DEVICE: @d_etc = addrspace(1) externally_initialized global %struct.ETC zeroinitializer,
// HOST: @d_etc = internal global %struct.ETC undef,
__shared__ ETC s_etc;
// DEVICE: @s_etc = addrspace(3) global %struct.ETC undef,
// HOST: @s_etc = internal global %struct.ETC undef,
__constant__ ETC c_etc;
// DEVICE: @c_etc = addrspace(4) externally_initialized global %struct.ETC zeroinitializer,
// HOST: @c_etc = internal global %struct.ETC undef,
__device__ NCFS d_ncfs;
// DEVICE: @d_ncfs = addrspace(1) externally_initialized global %struct.NCFS { i32 3 }
// HOST: @d_ncfs = internal global %struct.NCFS undef,
__constant__ NCFS c_ncfs;
// DEVICE: @c_ncfs = addrspace(4) externally_initialized global %struct.NCFS { i32 3 }
// HOST: @c_ncfs = internal global %struct.NCFS undef,
// Regular base class -- allowed
__device__ T_B_T d_t_b_t;
// DEVICE: @d_t_b_t = addrspace(1) externally_initialized global %struct.T_B_T zeroinitializer,
// HOST: @d_t_b_t = internal global %struct.T_B_T undef,
__shared__ T_B_T s_t_b_t;
// DEVICE: @s_t_b_t = addrspace(3) global %struct.T_B_T undef,
// HOST: @s_t_b_t = internal global %struct.T_B_T undef,
__constant__ T_B_T c_t_b_t;
// DEVICE: @c_t_b_t = addrspace(4) externally_initialized global %struct.T_B_T zeroinitializer,
// HOST: @c_t_b_t = internal global %struct.T_B_T undef,
// Incapsulated object of allowed class -- allowed
__device__ T_F_T d_t_f_t;
// DEVICE: @d_t_f_t = addrspace(1) externally_initialized global %struct.T_F_T zeroinitializer,
// HOST: @d_t_f_t = internal global %struct.T_F_T undef,
__shared__ T_F_T s_t_f_t;
// DEVICE: @s_t_f_t = addrspace(3) global %struct.T_F_T undef,
// HOST: @s_t_f_t = internal global %struct.T_F_T undef,
__constant__ T_F_T c_t_f_t;
// DEVICE: @c_t_f_t = addrspace(4) externally_initialized global %struct.T_F_T zeroinitializer,
// HOST: @c_t_f_t = internal global %struct.T_F_T undef,
// array of allowed objects -- allowed
__device__ T_FA_T d_t_fa_t;
// DEVICE: @d_t_fa_t = addrspace(1) externally_initialized global %struct.T_FA_T zeroinitializer,
// HOST: @d_t_fa_t = internal global %struct.T_FA_T undef,
__shared__ T_FA_T s_t_fa_t;
// DEVICE: @s_t_fa_t = addrspace(3) global %struct.T_FA_T undef,
// HOST: @s_t_fa_t = internal global %struct.T_FA_T undef,
__constant__ T_FA_T c_t_fa_t;
// DEVICE: @c_t_fa_t = addrspace(4) externally_initialized global %struct.T_FA_T zeroinitializer,
// HOST: @c_t_fa_t = internal global %struct.T_FA_T undef,
// Calling empty base class initializer is OK
__device__ EC_I_EC d_ec_i_ec;
// DEVICE: @d_ec_i_ec = addrspace(1) externally_initialized global %struct.EC_I_EC zeroinitializer,
// HOST: @d_ec_i_ec = internal global %struct.EC_I_EC undef,
__shared__ EC_I_EC s_ec_i_ec;
// DEVICE: @s_ec_i_ec = addrspace(3) global %struct.EC_I_EC undef,
// HOST: @s_ec_i_ec = internal global %struct.EC_I_EC undef,
__constant__ EC_I_EC c_ec_i_ec;
// DEVICE: @c_ec_i_ec = addrspace(4) externally_initialized global %struct.EC_I_EC zeroinitializer,
// HOST: @c_ec_i_ec = internal global %struct.EC_I_EC undef,
// DEVICE: @_ZZ2dfvE4s_ec = internal addrspace(3) global %struct.EC undef
// DEVICE: @_ZZ2dfvE5s_etc = internal addrspace(3) global %struct.ETC undef
// DEVICE: @_ZZ2dfvE11const_array = internal addrspace(4) constant [5 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5]
// DEVICE: @_ZZ2dfvE9const_int = internal addrspace(4) constant i32 123
// We should not emit global initializers for device-side variables.
// DEVICE-NOT: @__cxx_global_var_init
// Make sure that initialization restrictions do not apply to local
// variables.
__device__ void df() {
// NVPTX: %[[ec:.*]] = alloca %struct.EC
// NVPTX: %[[ed:.*]] = alloca %struct.ED
// NVPTX: %[[ecd:.*]] = alloca %struct.ECD
// NVPTX: %[[etc:.*]] = alloca %struct.ETC
// NVPTX: %[[uc:.*]] = alloca %struct.UC
// NVPTX: %[[ud:.*]] = alloca %struct.UD
// NVPTX: %[[eci:.*]] = alloca %struct.ECI
// NVPTX: %[[nec:.*]] = alloca %struct.NEC
// NVPTX: %[[ned:.*]] = alloca %struct.NED
// NVPTX: %[[ncv:.*]] = alloca %struct.NCV
// NVPTX: %[[vd:.*]] = alloca %struct.VD
// NVPTX: %[[ncf:.*]] = alloca %struct.NCF
// NVPTX: %[[ncfs:.*]] = alloca %struct.NCFS
// NVPTX: %[[utc:.*]] = alloca %struct.UTC
// NVPTX: %[[netc:.*]] = alloca %struct.NETC
// NVPTX: %[[ec_i_ec:.*]] = alloca %struct.EC_I_EC
// NVPTX: %[[ec_i_ec1:.*]] = alloca %struct.EC_I_EC1
// NVPTX: %[[t_v_t:.*]] = alloca %struct.T_V_T
// NVPTX: %[[t_b_nec:.*]] = alloca %struct.T_B_NEC
// NVPTX: %[[t_f_nec:.*]] = alloca %struct.T_F_NEC
// NVPTX: %[[t_fa_nec:.*]] = alloca %struct.T_FA_NEC
// NVPTX: %[[t_b_ned:.*]] = alloca %struct.T_B_NED
// NVPTX: %[[t_f_ned:.*]] = alloca %struct.T_F_NED
// NVPTX: %[[t_fa_ned:.*]] = alloca %struct.T_FA_NED
// AMDGCN: %[[ec:.*]] = addrspacecast %struct.EC addrspace(5)* %ec to %struct.EC*
// AMDGCN: %[[ed:.*]] = addrspacecast %struct.ED addrspace(5)* %ed to %struct.ED*
// AMDGCN: %[[ecd:.*]] = addrspacecast %struct.ECD addrspace(5)* %ecd to %struct.ECD*
// AMDGCN: %[[etc:.*]] = addrspacecast %struct.ETC addrspace(5)* %etc to %struct.ETC*
// AMDGCN: %[[uc:.*]] = addrspacecast %struct.UC addrspace(5)* %uc to %struct.UC*
// AMDGCN: %[[ud:.*]] = addrspacecast %struct.UD addrspace(5)* %ud to %struct.UD*
// AMDGCN: %[[eci:.*]] = addrspacecast %struct.ECI addrspace(5)* %eci to %struct.ECI*
// AMDGCN: %[[nec:.*]] = addrspacecast %struct.NEC addrspace(5)* %nec to %struct.NEC*
// AMDGCN: %[[ned:.*]] = addrspacecast %struct.NED addrspace(5)* %ned to %struct.NED*
// AMDGCN: %[[ncv:.*]] = addrspacecast %struct.NCV addrspace(5)* %ncv to %struct.NCV*
// AMDGCN: %[[vd:.*]] = addrspacecast %struct.VD addrspace(5)* %vd to %struct.VD*
// AMDGCN: %[[ncf:.*]] = addrspacecast %struct.NCF addrspace(5)* %ncf to %struct.NCF*
// AMDGCN: %[[ncfs:.*]] = addrspacecast %struct.NCFS addrspace(5)* %ncfs to %struct.NCFS*
// AMDGCN: %[[utc:.*]] = addrspacecast %struct.UTC addrspace(5)* %utc to %struct.UTC*
// AMDGCN: %[[netc:.*]] = addrspacecast %struct.NETC addrspace(5)* %netc to %struct.NETC*
// AMDGCN: %[[ec_i_ec:.*]] = addrspacecast %struct.EC_I_EC addrspace(5)* %ec_i_ec to %struct.EC_I_EC*
// AMDGCN: %[[ec_i_ec1:.*]] = addrspacecast %struct.EC_I_EC1 addrspace(5)* %ec_i_ec1 to %struct.EC_I_EC1*
// AMDGCN: %[[t_v_t:.*]] = addrspacecast %struct.T_V_T addrspace(5)* %t_v_t to %struct.T_V_T*
// AMDGCN: %[[t_b_nec:.*]] = addrspacecast %struct.T_B_NEC addrspace(5)* %t_b_nec to %struct.T_B_NEC*
// AMDGCN: %[[t_f_nec:.*]] = addrspacecast %struct.T_F_NEC addrspace(5)* %t_f_nec to %struct.T_F_NEC*
// AMDGCN: %[[t_fa_nec:.*]] = addrspacecast %struct.T_FA_NEC addrspace(5)* %t_fa_nec to %struct.T_FA_NEC*
// AMDGCN: %[[t_b_ned:.*]] = addrspacecast %struct.T_B_NED addrspace(5)* %t_b_ned to %struct.T_B_NED*
// AMDGCN: %[[t_f_ned:.*]] = addrspacecast %struct.T_F_NED addrspace(5)* %t_f_ned to %struct.T_F_NED*
// AMDGCN: %[[t_fa_ned:.*]] = addrspacecast %struct.T_FA_NED addrspace(5)* %t_fa_ned to %struct.T_FA_NED*
T t;
// DEVICE-NOT: call
EC ec;
// DEVICE: call void @_ZN2ECC1Ev(%struct.EC* {{[^,]*}} %[[ec]])
ED ed;
// DEVICE-NOT: call
ECD ecd;
// DEVICE: call void @_ZN3ECDC1Ev(%struct.ECD* {{[^,]*}} %[[ecd]])
ETC etc;
// DEVICE: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* {{[^,]*}} %[[etc]])
UC uc;
// undefined constructor -- not allowed
// DEVICE: call void @_ZN2UCC1Ev(%struct.UC* {{[^,]*}} %[[uc]])
UD ud;
// undefined destructor -- not allowed
// DEVICE-NOT: call
ECI eci;
// empty constructor w/ initializer list -- not allowed
// DEVICE: call void @_ZN3ECIC1Ev(%struct.ECI* {{[^,]*}} %[[eci]])
NEC nec;
// non-empty constructor -- not allowed
// DEVICE: call void @_ZN3NECC1Ev(%struct.NEC* {{[^,]*}} %[[nec]])
// non-empty destructor -- not allowed
NED ned;
// no-constructor, virtual method -- not allowed
// DEVICE: call void @_ZN3NCVC1Ev(%struct.NCV* {{[^,]*}} %[[ncv]])
NCV ncv;
// DEVICE-NOT: call
VD vd;
// DEVICE: call void @_ZN2VDC1Ev(%struct.VD* {{[^,]*}} %[[vd]])
NCF ncf;
// DEVICE: call void @_ZN3NCFC1Ev(%struct.NCF* {{[^,]*}} %[[ncf]])
NCFS ncfs;
// DEVICE: call void @_ZN4NCFSC1Ev(%struct.NCFS* {{[^,]*}} %[[ncfs]])
UTC utc;
// DEVICE: call void @_ZN3UTCC1IJEEEDpT_(%struct.UTC* {{[^,]*}} %[[utc]])
NETC netc;
// DEVICE: call void @_ZN4NETCC1IJEEEDpT_(%struct.NETC* {{[^,]*}} %[[netc]])
T_B_T t_b_t;
// DEVICE-NOT: call
T_F_T t_f_t;
// DEVICE-NOT: call
T_FA_T t_fa_t;
// DEVICE-NOT: call
EC_I_EC ec_i_ec;
// DEVICE: call void @_ZN7EC_I_ECC1Ev(%struct.EC_I_EC* {{[^,]*}} %[[ec_i_ec]])
EC_I_EC1 ec_i_ec1;
// DEVICE: call void @_ZN8EC_I_EC1C1Ev(%struct.EC_I_EC1* {{[^,]*}} %[[ec_i_ec1]])
T_V_T t_v_t;
// DEVICE: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* {{[^,]*}} %[[t_v_t]])
T_B_NEC t_b_nec;
// DEVICE: call void @_ZN7T_B_NECC1Ev(%struct.T_B_NEC* {{[^,]*}} %[[t_b_nec]])
T_F_NEC t_f_nec;
// DEVICE: call void @_ZN7T_F_NECC1Ev(%struct.T_F_NEC* {{[^,]*}} %[[t_f_nec]])
T_FA_NEC t_fa_nec;
// DEVICE: call void @_ZN8T_FA_NECC1Ev(%struct.T_FA_NEC* {{[^,]*}} %[[t_fa_nec]])
T_B_NED t_b_ned;
// DEVICE-NOT: call
T_F_NED t_f_ned;
// DEVICE-NOT: call
T_FA_NED t_fa_ned;
// DEVICE-NOT: call
static __shared__ EC s_ec;
// DEVICE-NOT: call void @_ZN2ECC1Ev(%struct.EC* addrspacecast (%struct.EC addrspace(3)* @_ZZ2dfvE4s_ec to %struct.EC*))
static __shared__ ETC s_etc;
// DEVICE-NOT: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* addrspacecast (%struct.ETC addrspace(3)* @_ZZ2dfvE5s_etc to %struct.ETC*))
static const int const_array[] = {1, 2, 3, 4, 5};
static const int const_int = 123;
// anchor point separating constructors and destructors
df(); // DEVICE: call void @_Z2dfv()
// Verify that we only call non-empty destructors
// DEVICE-NEXT: call void @_ZN8T_FA_NEDD1Ev(%struct.T_FA_NED* {{[^,]*}} %[[t_fa_ned]])
// DEVICE-NEXT: call void @_ZN7T_F_NEDD1Ev(%struct.T_F_NED* {{[^,]*}} %[[t_f_ned]])
// DEVICE-NEXT: call void @_ZN7T_B_NEDD1Ev(%struct.T_B_NED* {{[^,]*}} %[[t_b_ned]])
// DEVICE-NEXT: call void @_ZN2VDD1Ev(%struct.VD* {{[^,]*}} %[[vd]])
// DEVICE-NEXT: call void @_ZN3NEDD1Ev(%struct.NED* {{[^,]*}} %[[ned]])
// DEVICE-NEXT: call void @_ZN2UDD1Ev(%struct.UD* {{[^,]*}} %[[ud]])
// DEVICE-NEXT: call void @_ZN3ECDD1Ev(%struct.ECD* {{[^,]*}} %[[ecd]])
// DEVICE-NEXT: call void @_ZN2EDD1Ev(%struct.ED* {{[^,]*}} %[[ed]])
// DEVICE-NEXT: ret void
}
// We should not emit global init function.
// DEVICE-NOT: @_GLOBAL__sub_I
| 45296027b6ca65b8e5ae941f542df118d0fdf540.cu | // REQUIRES: nvptx-registered-target
// REQUIRES: amdgpu-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck -check-prefixes=DEVICE,NVPTX %s
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck -check-prefixes=HOST %s
// RUN: %clang_cc1 -triple amdgcn -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck -check-prefixes=DEVICE,AMDGCN %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with Sema tests.
#include "Inputs/cuda-initializers.h"
__device__ int d_v;
// DEVICE: @d_v = addrspace(1) externally_initialized global i32 0,
// HOST: @d_v = internal global i32 undef,
__shared__ int s_v;
// DEVICE: @s_v = addrspace(3) global i32 undef,
// HOST: @s_v = internal global i32 undef,
__constant__ int c_v;
// DEVICE: addrspace(4) externally_initialized global i32 0,
// HOST: @c_v = internal global i32 undef,
__device__ int d_v_i = 1;
// DEVICE: @d_v_i = addrspace(1) externally_initialized global i32 1,
// HOST: @d_v_i = internal global i32 undef,
// For `static` device variables, assume they won't be addressed from the host
// side.
static __device__ int d_s_v_i = 1;
// DEVICE: @_ZL7d_s_v_i = internal addrspace(1) global i32 1,
// Dummy function to keep static variables referenced.
__device__ int foo() {
return d_s_v_i;
}
// trivial constructor -- allowed
__device__ T d_t;
// DEVICE: @d_t = addrspace(1) externally_initialized global %struct.T zeroinitializer
// HOST: @d_t = internal global %struct.T undef,
__shared__ T s_t;
// DEVICE: @s_t = addrspace(3) global %struct.T undef,
// HOST: @s_t = internal global %struct.T undef,
__constant__ T c_t;
// DEVICE: @c_t = addrspace(4) externally_initialized global %struct.T zeroinitializer,
// HOST: @c_t = internal global %struct.T undef,
__device__ T d_t_i = {2};
// DEVICE: @d_t_i = addrspace(1) externally_initialized global %struct.T { i32 2 },
// HOST: @d_t_i = internal global %struct.T undef,
__constant__ T c_t_i = {2};
// DEVICE: @c_t_i = addrspace(4) externally_initialized global %struct.T { i32 2 },
// HOST: @c_t_i = internal global %struct.T undef,
// empty constructor
__device__ EC d_ec;
// DEVICE: @d_ec = addrspace(1) externally_initialized global %struct.EC zeroinitializer,
// HOST: @d_ec = internal global %struct.EC undef,
__shared__ EC s_ec;
// DEVICE: @s_ec = addrspace(3) global %struct.EC undef,
// HOST: @s_ec = internal global %struct.EC undef,
__constant__ EC c_ec;
// DEVICE: @c_ec = addrspace(4) externally_initialized global %struct.EC zeroinitializer,
// HOST: @c_ec = internal global %struct.EC undef
// empty destructor
__device__ ED d_ed;
// DEVICE: @d_ed = addrspace(1) externally_initialized global %struct.ED zeroinitializer,
// HOST: @d_ed = internal global %struct.ED undef,
__shared__ ED s_ed;
// DEVICE: @s_ed = addrspace(3) global %struct.ED undef,
// HOST: @s_ed = internal global %struct.ED undef,
__constant__ ED c_ed;
// DEVICE: @c_ed = addrspace(4) externally_initialized global %struct.ED zeroinitializer,
// HOST: @c_ed = internal global %struct.ED undef,
__device__ ECD d_ecd;
// DEVICE: @d_ecd = addrspace(1) externally_initialized global %struct.ECD zeroinitializer,
// HOST: @d_ecd = internal global %struct.ECD undef,
__shared__ ECD s_ecd;
// DEVICE: @s_ecd = addrspace(3) global %struct.ECD undef,
// HOST: @s_ecd = internal global %struct.ECD undef,
__constant__ ECD c_ecd;
// DEVICE: @c_ecd = addrspace(4) externally_initialized global %struct.ECD zeroinitializer,
// HOST: @c_ecd = internal global %struct.ECD undef,
// empty templated constructor -- allowed with no arguments
__device__ ETC d_etc;
// DEVICE: @d_etc = addrspace(1) externally_initialized global %struct.ETC zeroinitializer,
// HOST: @d_etc = internal global %struct.ETC undef,
__shared__ ETC s_etc;
// DEVICE: @s_etc = addrspace(3) global %struct.ETC undef,
// HOST: @s_etc = internal global %struct.ETC undef,
__constant__ ETC c_etc;
// DEVICE: @c_etc = addrspace(4) externally_initialized global %struct.ETC zeroinitializer,
// HOST: @c_etc = internal global %struct.ETC undef,
__device__ NCFS d_ncfs;
// DEVICE: @d_ncfs = addrspace(1) externally_initialized global %struct.NCFS { i32 3 }
// HOST: @d_ncfs = internal global %struct.NCFS undef,
__constant__ NCFS c_ncfs;
// DEVICE: @c_ncfs = addrspace(4) externally_initialized global %struct.NCFS { i32 3 }
// HOST: @c_ncfs = internal global %struct.NCFS undef,
// Regular base class -- allowed
__device__ T_B_T d_t_b_t;
// DEVICE: @d_t_b_t = addrspace(1) externally_initialized global %struct.T_B_T zeroinitializer,
// HOST: @d_t_b_t = internal global %struct.T_B_T undef,
__shared__ T_B_T s_t_b_t;
// DEVICE: @s_t_b_t = addrspace(3) global %struct.T_B_T undef,
// HOST: @s_t_b_t = internal global %struct.T_B_T undef,
__constant__ T_B_T c_t_b_t;
// DEVICE: @c_t_b_t = addrspace(4) externally_initialized global %struct.T_B_T zeroinitializer,
// HOST: @c_t_b_t = internal global %struct.T_B_T undef,
// Incapsulated object of allowed class -- allowed
__device__ T_F_T d_t_f_t;
// DEVICE: @d_t_f_t = addrspace(1) externally_initialized global %struct.T_F_T zeroinitializer,
// HOST: @d_t_f_t = internal global %struct.T_F_T undef,
__shared__ T_F_T s_t_f_t;
// DEVICE: @s_t_f_t = addrspace(3) global %struct.T_F_T undef,
// HOST: @s_t_f_t = internal global %struct.T_F_T undef,
__constant__ T_F_T c_t_f_t;
// DEVICE: @c_t_f_t = addrspace(4) externally_initialized global %struct.T_F_T zeroinitializer,
// HOST: @c_t_f_t = internal global %struct.T_F_T undef,
// array of allowed objects -- allowed
__device__ T_FA_T d_t_fa_t;
// DEVICE: @d_t_fa_t = addrspace(1) externally_initialized global %struct.T_FA_T zeroinitializer,
// HOST: @d_t_fa_t = internal global %struct.T_FA_T undef,
__shared__ T_FA_T s_t_fa_t;
// DEVICE: @s_t_fa_t = addrspace(3) global %struct.T_FA_T undef,
// HOST: @s_t_fa_t = internal global %struct.T_FA_T undef,
__constant__ T_FA_T c_t_fa_t;
// DEVICE: @c_t_fa_t = addrspace(4) externally_initialized global %struct.T_FA_T zeroinitializer,
// HOST: @c_t_fa_t = internal global %struct.T_FA_T undef,
// Calling empty base class initializer is OK
__device__ EC_I_EC d_ec_i_ec;
// DEVICE: @d_ec_i_ec = addrspace(1) externally_initialized global %struct.EC_I_EC zeroinitializer,
// HOST: @d_ec_i_ec = internal global %struct.EC_I_EC undef,
__shared__ EC_I_EC s_ec_i_ec;
// DEVICE: @s_ec_i_ec = addrspace(3) global %struct.EC_I_EC undef,
// HOST: @s_ec_i_ec = internal global %struct.EC_I_EC undef,
__constant__ EC_I_EC c_ec_i_ec;
// DEVICE: @c_ec_i_ec = addrspace(4) externally_initialized global %struct.EC_I_EC zeroinitializer,
// HOST: @c_ec_i_ec = internal global %struct.EC_I_EC undef,
// DEVICE: @_ZZ2dfvE4s_ec = internal addrspace(3) global %struct.EC undef
// DEVICE: @_ZZ2dfvE5s_etc = internal addrspace(3) global %struct.ETC undef
// DEVICE: @_ZZ2dfvE11const_array = internal addrspace(4) constant [5 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5]
// DEVICE: @_ZZ2dfvE9const_int = internal addrspace(4) constant i32 123
// We should not emit global initializers for device-side variables.
// DEVICE-NOT: @__cxx_global_var_init
// Make sure that initialization restrictions do not apply to local
// variables.
__device__ void df() {
// NVPTX: %[[ec:.*]] = alloca %struct.EC
// NVPTX: %[[ed:.*]] = alloca %struct.ED
// NVPTX: %[[ecd:.*]] = alloca %struct.ECD
// NVPTX: %[[etc:.*]] = alloca %struct.ETC
// NVPTX: %[[uc:.*]] = alloca %struct.UC
// NVPTX: %[[ud:.*]] = alloca %struct.UD
// NVPTX: %[[eci:.*]] = alloca %struct.ECI
// NVPTX: %[[nec:.*]] = alloca %struct.NEC
// NVPTX: %[[ned:.*]] = alloca %struct.NED
// NVPTX: %[[ncv:.*]] = alloca %struct.NCV
// NVPTX: %[[vd:.*]] = alloca %struct.VD
// NVPTX: %[[ncf:.*]] = alloca %struct.NCF
// NVPTX: %[[ncfs:.*]] = alloca %struct.NCFS
// NVPTX: %[[utc:.*]] = alloca %struct.UTC
// NVPTX: %[[netc:.*]] = alloca %struct.NETC
// NVPTX: %[[ec_i_ec:.*]] = alloca %struct.EC_I_EC
// NVPTX: %[[ec_i_ec1:.*]] = alloca %struct.EC_I_EC1
// NVPTX: %[[t_v_t:.*]] = alloca %struct.T_V_T
// NVPTX: %[[t_b_nec:.*]] = alloca %struct.T_B_NEC
// NVPTX: %[[t_f_nec:.*]] = alloca %struct.T_F_NEC
// NVPTX: %[[t_fa_nec:.*]] = alloca %struct.T_FA_NEC
// NVPTX: %[[t_b_ned:.*]] = alloca %struct.T_B_NED
// NVPTX: %[[t_f_ned:.*]] = alloca %struct.T_F_NED
// NVPTX: %[[t_fa_ned:.*]] = alloca %struct.T_FA_NED
// AMDGCN: %[[ec:.*]] = addrspacecast %struct.EC addrspace(5)* %ec to %struct.EC*
// AMDGCN: %[[ed:.*]] = addrspacecast %struct.ED addrspace(5)* %ed to %struct.ED*
// AMDGCN: %[[ecd:.*]] = addrspacecast %struct.ECD addrspace(5)* %ecd to %struct.ECD*
// AMDGCN: %[[etc:.*]] = addrspacecast %struct.ETC addrspace(5)* %etc to %struct.ETC*
// AMDGCN: %[[uc:.*]] = addrspacecast %struct.UC addrspace(5)* %uc to %struct.UC*
// AMDGCN: %[[ud:.*]] = addrspacecast %struct.UD addrspace(5)* %ud to %struct.UD*
// AMDGCN: %[[eci:.*]] = addrspacecast %struct.ECI addrspace(5)* %eci to %struct.ECI*
// AMDGCN: %[[nec:.*]] = addrspacecast %struct.NEC addrspace(5)* %nec to %struct.NEC*
// AMDGCN: %[[ned:.*]] = addrspacecast %struct.NED addrspace(5)* %ned to %struct.NED*
// AMDGCN: %[[ncv:.*]] = addrspacecast %struct.NCV addrspace(5)* %ncv to %struct.NCV*
// AMDGCN: %[[vd:.*]] = addrspacecast %struct.VD addrspace(5)* %vd to %struct.VD*
// AMDGCN: %[[ncf:.*]] = addrspacecast %struct.NCF addrspace(5)* %ncf to %struct.NCF*
// AMDGCN: %[[ncfs:.*]] = addrspacecast %struct.NCFS addrspace(5)* %ncfs to %struct.NCFS*
// AMDGCN: %[[utc:.*]] = addrspacecast %struct.UTC addrspace(5)* %utc to %struct.UTC*
// AMDGCN: %[[netc:.*]] = addrspacecast %struct.NETC addrspace(5)* %netc to %struct.NETC*
// AMDGCN: %[[ec_i_ec:.*]] = addrspacecast %struct.EC_I_EC addrspace(5)* %ec_i_ec to %struct.EC_I_EC*
// AMDGCN: %[[ec_i_ec1:.*]] = addrspacecast %struct.EC_I_EC1 addrspace(5)* %ec_i_ec1 to %struct.EC_I_EC1*
// AMDGCN: %[[t_v_t:.*]] = addrspacecast %struct.T_V_T addrspace(5)* %t_v_t to %struct.T_V_T*
// AMDGCN: %[[t_b_nec:.*]] = addrspacecast %struct.T_B_NEC addrspace(5)* %t_b_nec to %struct.T_B_NEC*
// AMDGCN: %[[t_f_nec:.*]] = addrspacecast %struct.T_F_NEC addrspace(5)* %t_f_nec to %struct.T_F_NEC*
// AMDGCN: %[[t_fa_nec:.*]] = addrspacecast %struct.T_FA_NEC addrspace(5)* %t_fa_nec to %struct.T_FA_NEC*
// AMDGCN: %[[t_b_ned:.*]] = addrspacecast %struct.T_B_NED addrspace(5)* %t_b_ned to %struct.T_B_NED*
// AMDGCN: %[[t_f_ned:.*]] = addrspacecast %struct.T_F_NED addrspace(5)* %t_f_ned to %struct.T_F_NED*
// AMDGCN: %[[t_fa_ned:.*]] = addrspacecast %struct.T_FA_NED addrspace(5)* %t_fa_ned to %struct.T_FA_NED*
T t;
// DEVICE-NOT: call
EC ec;
// DEVICE: call void @_ZN2ECC1Ev(%struct.EC* {{[^,]*}} %[[ec]])
ED ed;
// DEVICE-NOT: call
ECD ecd;
// DEVICE: call void @_ZN3ECDC1Ev(%struct.ECD* {{[^,]*}} %[[ecd]])
ETC etc;
// DEVICE: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* {{[^,]*}} %[[etc]])
UC uc;
// undefined constructor -- not allowed
// DEVICE: call void @_ZN2UCC1Ev(%struct.UC* {{[^,]*}} %[[uc]])
UD ud;
// undefined destructor -- not allowed
// DEVICE-NOT: call
ECI eci;
// empty constructor w/ initializer list -- not allowed
// DEVICE: call void @_ZN3ECIC1Ev(%struct.ECI* {{[^,]*}} %[[eci]])
NEC nec;
// non-empty constructor -- not allowed
// DEVICE: call void @_ZN3NECC1Ev(%struct.NEC* {{[^,]*}} %[[nec]])
// non-empty destructor -- not allowed
NED ned;
// no-constructor, virtual method -- not allowed
// DEVICE: call void @_ZN3NCVC1Ev(%struct.NCV* {{[^,]*}} %[[ncv]])
NCV ncv;
// DEVICE-NOT: call
VD vd;
// DEVICE: call void @_ZN2VDC1Ev(%struct.VD* {{[^,]*}} %[[vd]])
NCF ncf;
// DEVICE: call void @_ZN3NCFC1Ev(%struct.NCF* {{[^,]*}} %[[ncf]])
NCFS ncfs;
// DEVICE: call void @_ZN4NCFSC1Ev(%struct.NCFS* {{[^,]*}} %[[ncfs]])
UTC utc;
// DEVICE: call void @_ZN3UTCC1IJEEEDpT_(%struct.UTC* {{[^,]*}} %[[utc]])
NETC netc;
// DEVICE: call void @_ZN4NETCC1IJEEEDpT_(%struct.NETC* {{[^,]*}} %[[netc]])
T_B_T t_b_t;
// DEVICE-NOT: call
T_F_T t_f_t;
// DEVICE-NOT: call
T_FA_T t_fa_t;
// DEVICE-NOT: call
EC_I_EC ec_i_ec;
// DEVICE: call void @_ZN7EC_I_ECC1Ev(%struct.EC_I_EC* {{[^,]*}} %[[ec_i_ec]])
EC_I_EC1 ec_i_ec1;
// DEVICE: call void @_ZN8EC_I_EC1C1Ev(%struct.EC_I_EC1* {{[^,]*}} %[[ec_i_ec1]])
T_V_T t_v_t;
// DEVICE: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* {{[^,]*}} %[[t_v_t]])
T_B_NEC t_b_nec;
// DEVICE: call void @_ZN7T_B_NECC1Ev(%struct.T_B_NEC* {{[^,]*}} %[[t_b_nec]])
T_F_NEC t_f_nec;
// DEVICE: call void @_ZN7T_F_NECC1Ev(%struct.T_F_NEC* {{[^,]*}} %[[t_f_nec]])
T_FA_NEC t_fa_nec;
// DEVICE: call void @_ZN8T_FA_NECC1Ev(%struct.T_FA_NEC* {{[^,]*}} %[[t_fa_nec]])
T_B_NED t_b_ned;
// DEVICE-NOT: call
T_F_NED t_f_ned;
// DEVICE-NOT: call
T_FA_NED t_fa_ned;
// DEVICE-NOT: call
static __shared__ EC s_ec;
// DEVICE-NOT: call void @_ZN2ECC1Ev(%struct.EC* addrspacecast (%struct.EC addrspace(3)* @_ZZ2dfvE4s_ec to %struct.EC*))
static __shared__ ETC s_etc;
// DEVICE-NOT: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* addrspacecast (%struct.ETC addrspace(3)* @_ZZ2dfvE5s_etc to %struct.ETC*))
static const int const_array[] = {1, 2, 3, 4, 5};
static const int const_int = 123;
// anchor point separating constructors and destructors
df(); // DEVICE: call void @_Z2dfv()
// Verify that we only call non-empty destructors
// DEVICE-NEXT: call void @_ZN8T_FA_NEDD1Ev(%struct.T_FA_NED* {{[^,]*}} %[[t_fa_ned]])
// DEVICE-NEXT: call void @_ZN7T_F_NEDD1Ev(%struct.T_F_NED* {{[^,]*}} %[[t_f_ned]])
// DEVICE-NEXT: call void @_ZN7T_B_NEDD1Ev(%struct.T_B_NED* {{[^,]*}} %[[t_b_ned]])
// DEVICE-NEXT: call void @_ZN2VDD1Ev(%struct.VD* {{[^,]*}} %[[vd]])
// DEVICE-NEXT: call void @_ZN3NEDD1Ev(%struct.NED* {{[^,]*}} %[[ned]])
// DEVICE-NEXT: call void @_ZN2UDD1Ev(%struct.UD* {{[^,]*}} %[[ud]])
// DEVICE-NEXT: call void @_ZN3ECDD1Ev(%struct.ECD* {{[^,]*}} %[[ecd]])
// DEVICE-NEXT: call void @_ZN2EDD1Ev(%struct.ED* {{[^,]*}} %[[ed]])
// DEVICE-NEXT: ret void
}
// We should not emit global init function.
// DEVICE-NOT: @_GLOBAL__sub_I
|
5743adb3de86e08f4465d2cce77229a01a05cfcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_HETERPS
#include <algorithm>
#include <ctime>
#include <memory>
#include <numeric>
#include "paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h"
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace framework {
__global__ void PullCopy(float** dest, const FeatureValue* src,
const int64_t* len, int hidden, int slot_num,
int total_len, uint64_t** keys) {
CUDA_KERNEL_LOOP(i, total_len) {
int low = 0;
int high = slot_num - 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < len[mid])
high = mid;
else
low = mid + 1;
}
int x = low;
int y = i - (x ? len[x - 1] : 0);
if (*(keys[x] + y) == 0) {
*(dest[x] + y * hidden) = 0;
*(dest[x] + y * hidden + 1) = 0;
*(dest[x] + y * hidden + 2) = 0;
} else {
*(dest[x] + y * hidden) = (src + i)->show;
*(dest[x] + y * hidden + 1) = (src + i)->clk;
*(dest[x] + y * hidden + 2) = (src + i)->lr;
}
if ((src + i)->mf_size == 0 || *(keys[x] + y) == 0) {
for (int j = 0; j < 8; j++) {
*(dest[x] + y * hidden + 3 + j) = 0;
}
} else {
for (int j = 0; j < 8; j++) {
*(dest[x] + y * hidden + 3 + j) = (src + i)->mf[1 + j];
}
}
}
}
__global__ void CopyKeysKernel(uint64_t** src_keys, uint64_t* dest_total_keys,
const int64_t* len, int slot_num,
int total_len) {
CUDA_KERNEL_LOOP(i, total_len) {
int low = 0;
int high = slot_num - 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < len[mid])
high = mid;
else
low = mid + 1;
}
int x = low;
int y = i - (x ? len[x - 1] : 0);
dest_total_keys[i] = src_keys[x][y];
}
}
__global__ void PushCopy(FeaturePushValue* dest, float** src, int64_t* len,
int hidden, int slot_num, int total_len, int bs,
int* slot_vector) {
CUDA_KERNEL_LOOP(i, total_len) {
int low = 0;
int high = slot_num - 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < len[mid])
high = mid;
else
low = mid + 1;
}
int x = low;
int y = i - (x ? len[low - 1] : 0);
(dest + i)->slot = slot_vector[x];
(dest + i)->show = *(src[x] + y * hidden);
(dest + i)->clk = *(src[x] + y * hidden + 1);
(dest + i)->lr_g = *(src[x] + y * hidden + 2) * -1. * bs;
for (int j = 0; j < 8; j++) {
(dest + i)->mf_g[j] = *(src[x] + y * hidden + 3 + j) * -1. * bs;
}
}
}
void PSGPUWrapper::CopyForPull(const paddle::platform::Place& place,
uint64_t** gpu_keys,
const std::vector<float*>& values,
const FeatureValue* total_values_gpu,
const int64_t* gpu_len, const int slot_num,
const int hidden_size,
const int64_t total_length) {
auto stream = dynamic_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(
BOOST_GET_CONST(platform::CUDAPlace, place)))
->stream();
auto buf_value = memory::AllocShared(place, values.size() * sizeof(float*));
float** gpu_values = reinterpret_cast<float**>(buf_value->ptr());
hipMemcpy(gpu_values, values.data(), values.size() * sizeof(float*),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( PullCopy), dim3((total_length + 512 - 1) / 512), dim3(512), 0, stream,
gpu_values, total_values_gpu, gpu_len, hidden_size, slot_num,
total_length, gpu_keys);
hipStreamSynchronize(stream);
}
void PSGPUWrapper::CopyKeys(const paddle::platform::Place& place,
uint64_t** origin_keys, uint64_t* total_keys,
const int64_t* gpu_len, int slot_num,
int total_len) {
auto stream = dynamic_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(
BOOST_GET_CONST(platform::CUDAPlace, place)))
->stream();
hipLaunchKernelGGL(( CopyKeysKernel), dim3((total_len + 512 - 1) / 512), dim3(512), 0, stream,
origin_keys, total_keys, gpu_len, slot_num, total_len);
hipStreamSynchronize(stream);
}
void PSGPUWrapper::CopyForPush(const paddle::platform::Place& place,
const std::vector<const float*>& grad_values,
FeaturePushValue* total_grad_values_gpu,
const std::vector<int64_t>& slot_lengths,
const int hidden_size,
const int64_t total_length,
const int batch_size) {
auto stream = dynamic_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(
BOOST_GET_CONST(platform::CUDAPlace, place)))
->stream();
auto slot_lengths_lod = slot_lengths;
for (int i = 1; i < slot_lengths_lod.size(); i++) {
slot_lengths_lod[i] += slot_lengths_lod[i - 1];
}
auto buf_grad_value =
memory::AllocShared(place, grad_values.size() * sizeof(float*));
auto buf_length =
memory::AllocShared(place, slot_lengths.size() * sizeof(int64_t));
auto buf_slot_vector =
memory::AllocShared(place, slot_lengths_lod.size() * sizeof(int));
float** gpu_values = reinterpret_cast<float**>(buf_grad_value->ptr());
int64_t* gpu_len = reinterpret_cast<int64_t*>(buf_length->ptr());
int* d_slot_vector = reinterpret_cast<int*>(buf_slot_vector->ptr());
hipMemcpy(gpu_values, grad_values.data(),
grad_values.size() * sizeof(float*), hipMemcpyHostToDevice);
hipMemcpy(gpu_len, slot_lengths_lod.data(),
slot_lengths.size() * sizeof(int64_t), hipMemcpyHostToDevice);
hipMemcpy(d_slot_vector, slot_vector_.data(),
slot_lengths_lod.size() * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( PushCopy), dim3((total_length + 512 - 1) / 512), dim3(512), 0, stream,
total_grad_values_gpu, gpu_values, gpu_len, hidden_size,
slot_lengths.size(), total_length, batch_size, d_slot_vector);
hipStreamSynchronize(stream);
}
void PSGPUWrapper::SetSparseSGD(float nonclk_coeff, float clk_coeff,
float min_bound, float max_bound,
float learning_rate, float initial_g2sum,
float initial_range) {
hipMemcpyToSymbol(optimizer_config::nonclk_coeff, &nonclk_coeff,
sizeof(float));
hipMemcpyToSymbol(optimizer_config::clk_coeff, &clk_coeff, sizeof(float));
hipMemcpyToSymbol(optimizer_config::min_bound, &min_bound, sizeof(float));
hipMemcpyToSymbol(optimizer_config::max_bound, &max_bound, sizeof(float));
hipMemcpyToSymbol(optimizer_config::learning_rate, &learning_rate,
sizeof(float));
hipMemcpyToSymbol(optimizer_config::initial_g2sum, &initial_g2sum,
sizeof(float));
hipMemcpyToSymbol(optimizer_config::initial_range, &initial_range,
sizeof(float));
}
void PSGPUWrapper::SetEmbedxSGD(float mf_create_thresholds,
float mf_learning_rate, float mf_initial_g2sum,
float mf_initial_range, float mf_min_bound,
float mf_max_bound) {
hipMemcpyToSymbol(optimizer_config::mf_create_thresholds,
&mf_create_thresholds, sizeof(float));
hipMemcpyToSymbol(optimizer_config::mf_learning_rate, &mf_learning_rate,
sizeof(float));
hipMemcpyToSymbol(optimizer_config::mf_initial_g2sum, &mf_initial_g2sum,
sizeof(float));
hipMemcpyToSymbol(optimizer_config::mf_initial_range, &mf_initial_range,
sizeof(float));
hipMemcpyToSymbol(optimizer_config::mf_min_bound, &mf_min_bound,
sizeof(float));
hipMemcpyToSymbol(optimizer_config::mf_max_bound, &mf_max_bound,
sizeof(float));
}
} // end namespace framework
} // end namespace paddle
#endif
| 5743adb3de86e08f4465d2cce77229a01a05cfcc.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_HETERPS
#include <algorithm>
#include <ctime>
#include <memory>
#include <numeric>
#include "paddle/fluid/framework/fleet/heter_ps/optimizer_conf.h"
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace framework {
__global__ void PullCopy(float** dest, const FeatureValue* src,
const int64_t* len, int hidden, int slot_num,
int total_len, uint64_t** keys) {
CUDA_KERNEL_LOOP(i, total_len) {
int low = 0;
int high = slot_num - 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < len[mid])
high = mid;
else
low = mid + 1;
}
int x = low;
int y = i - (x ? len[x - 1] : 0);
if (*(keys[x] + y) == 0) {
*(dest[x] + y * hidden) = 0;
*(dest[x] + y * hidden + 1) = 0;
*(dest[x] + y * hidden + 2) = 0;
} else {
*(dest[x] + y * hidden) = (src + i)->show;
*(dest[x] + y * hidden + 1) = (src + i)->clk;
*(dest[x] + y * hidden + 2) = (src + i)->lr;
}
if ((src + i)->mf_size == 0 || *(keys[x] + y) == 0) {
for (int j = 0; j < 8; j++) {
*(dest[x] + y * hidden + 3 + j) = 0;
}
} else {
for (int j = 0; j < 8; j++) {
*(dest[x] + y * hidden + 3 + j) = (src + i)->mf[1 + j];
}
}
}
}
__global__ void CopyKeysKernel(uint64_t** src_keys, uint64_t* dest_total_keys,
const int64_t* len, int slot_num,
int total_len) {
CUDA_KERNEL_LOOP(i, total_len) {
int low = 0;
int high = slot_num - 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < len[mid])
high = mid;
else
low = mid + 1;
}
int x = low;
int y = i - (x ? len[x - 1] : 0);
dest_total_keys[i] = src_keys[x][y];
}
}
__global__ void PushCopy(FeaturePushValue* dest, float** src, int64_t* len,
int hidden, int slot_num, int total_len, int bs,
int* slot_vector) {
CUDA_KERNEL_LOOP(i, total_len) {
int low = 0;
int high = slot_num - 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < len[mid])
high = mid;
else
low = mid + 1;
}
int x = low;
int y = i - (x ? len[low - 1] : 0);
(dest + i)->slot = slot_vector[x];
(dest + i)->show = *(src[x] + y * hidden);
(dest + i)->clk = *(src[x] + y * hidden + 1);
(dest + i)->lr_g = *(src[x] + y * hidden + 2) * -1. * bs;
for (int j = 0; j < 8; j++) {
(dest + i)->mf_g[j] = *(src[x] + y * hidden + 3 + j) * -1. * bs;
}
}
}
void PSGPUWrapper::CopyForPull(const paddle::platform::Place& place,
uint64_t** gpu_keys,
const std::vector<float*>& values,
const FeatureValue* total_values_gpu,
const int64_t* gpu_len, const int slot_num,
const int hidden_size,
const int64_t total_length) {
auto stream = dynamic_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(
BOOST_GET_CONST(platform::CUDAPlace, place)))
->stream();
auto buf_value = memory::AllocShared(place, values.size() * sizeof(float*));
float** gpu_values = reinterpret_cast<float**>(buf_value->ptr());
cudaMemcpy(gpu_values, values.data(), values.size() * sizeof(float*),
cudaMemcpyHostToDevice);
PullCopy<<<(total_length + 512 - 1) / 512, 512, 0, stream>>>(
gpu_values, total_values_gpu, gpu_len, hidden_size, slot_num,
total_length, gpu_keys);
cudaStreamSynchronize(stream);
}
void PSGPUWrapper::CopyKeys(const paddle::platform::Place& place,
uint64_t** origin_keys, uint64_t* total_keys,
const int64_t* gpu_len, int slot_num,
int total_len) {
auto stream = dynamic_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(
BOOST_GET_CONST(platform::CUDAPlace, place)))
->stream();
CopyKeysKernel<<<(total_len + 512 - 1) / 512, 512, 0, stream>>>(
origin_keys, total_keys, gpu_len, slot_num, total_len);
cudaStreamSynchronize(stream);
}
void PSGPUWrapper::CopyForPush(const paddle::platform::Place& place,
const std::vector<const float*>& grad_values,
FeaturePushValue* total_grad_values_gpu,
const std::vector<int64_t>& slot_lengths,
const int hidden_size,
const int64_t total_length,
const int batch_size) {
auto stream = dynamic_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(
BOOST_GET_CONST(platform::CUDAPlace, place)))
->stream();
auto slot_lengths_lod = slot_lengths;
for (int i = 1; i < slot_lengths_lod.size(); i++) {
slot_lengths_lod[i] += slot_lengths_lod[i - 1];
}
auto buf_grad_value =
memory::AllocShared(place, grad_values.size() * sizeof(float*));
auto buf_length =
memory::AllocShared(place, slot_lengths.size() * sizeof(int64_t));
auto buf_slot_vector =
memory::AllocShared(place, slot_lengths_lod.size() * sizeof(int));
float** gpu_values = reinterpret_cast<float**>(buf_grad_value->ptr());
int64_t* gpu_len = reinterpret_cast<int64_t*>(buf_length->ptr());
int* d_slot_vector = reinterpret_cast<int*>(buf_slot_vector->ptr());
cudaMemcpy(gpu_values, grad_values.data(),
grad_values.size() * sizeof(float*), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_len, slot_lengths_lod.data(),
slot_lengths.size() * sizeof(int64_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_slot_vector, slot_vector_.data(),
slot_lengths_lod.size() * sizeof(int), cudaMemcpyHostToDevice);
PushCopy<<<(total_length + 512 - 1) / 512, 512, 0, stream>>>(
total_grad_values_gpu, gpu_values, gpu_len, hidden_size,
slot_lengths.size(), total_length, batch_size, d_slot_vector);
cudaStreamSynchronize(stream);
}
void PSGPUWrapper::SetSparseSGD(float nonclk_coeff, float clk_coeff,
float min_bound, float max_bound,
float learning_rate, float initial_g2sum,
float initial_range) {
cudaMemcpyToSymbol(optimizer_config::nonclk_coeff, &nonclk_coeff,
sizeof(float));
cudaMemcpyToSymbol(optimizer_config::clk_coeff, &clk_coeff, sizeof(float));
cudaMemcpyToSymbol(optimizer_config::min_bound, &min_bound, sizeof(float));
cudaMemcpyToSymbol(optimizer_config::max_bound, &max_bound, sizeof(float));
cudaMemcpyToSymbol(optimizer_config::learning_rate, &learning_rate,
sizeof(float));
cudaMemcpyToSymbol(optimizer_config::initial_g2sum, &initial_g2sum,
sizeof(float));
cudaMemcpyToSymbol(optimizer_config::initial_range, &initial_range,
sizeof(float));
}
void PSGPUWrapper::SetEmbedxSGD(float mf_create_thresholds,
float mf_learning_rate, float mf_initial_g2sum,
float mf_initial_range, float mf_min_bound,
float mf_max_bound) {
cudaMemcpyToSymbol(optimizer_config::mf_create_thresholds,
&mf_create_thresholds, sizeof(float));
cudaMemcpyToSymbol(optimizer_config::mf_learning_rate, &mf_learning_rate,
sizeof(float));
cudaMemcpyToSymbol(optimizer_config::mf_initial_g2sum, &mf_initial_g2sum,
sizeof(float));
cudaMemcpyToSymbol(optimizer_config::mf_initial_range, &mf_initial_range,
sizeof(float));
cudaMemcpyToSymbol(optimizer_config::mf_min_bound, &mf_min_bound,
sizeof(float));
cudaMemcpyToSymbol(optimizer_config::mf_max_bound, &mf_max_bound,
sizeof(float));
}
} // end namespace framework
} // end namespace paddle
#endif
|
91cce98de42976fb79a0a1f1b4883d6b534472dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/sg_cb.h>
#include <exceptions/cuda_exception.h>
#include <array/NDArrayFactory.h>
#define HS_MAX_EXP 6.0f
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
__global__ void hSoftmaxKernel(void *vsyn0, void *vsyn1, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference) {
auto syn0 = reinterpret_cast<T*>(vsyn0);
auto syn1 = reinterpret_cast<T*>(vsyn1);
auto expTable = reinterpret_cast<T*>(vexpTable);
auto neu1e = reinterpret_cast<T*>(vneu1e);
T dot(0.0f);
T g(0.0f);
T f(0.0f);
// dot
for (int e = 0; e < vectorLength; e++) {
dot += syn0[e] * syn1[e];
}
// gradient
if (dot < (T) - HS_MAX_EXP || dot >= (T) HS_MAX_EXP)
return;
int idx = static_cast<int>((dot + HS_MAX_EXP) * ((float) expLength / HS_MAX_EXP / 2.0f));
if (idx >= expLength || idx < 0)
return;
f = expTable[idx];
g = (static_cast<T>(1.0f) - static_cast<T>(code) - f) * (T) alpha;
// axpy1
for (int e = 0; e < vectorLength; e++) {
neu1e[e] = g * syn1[e] + neu1e[e];
}
// axpy2
if (!isInference) {
for (int e = 0; e < vectorLength; e++) {
syn1[e] = g * syn0[e] + syn1[e];
}
}
}
template <typename T>
void hSoftmax_(void *vsyn0, void *vsyn1, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference, hipStream_t* stream) {
hipLaunchKernelGGL(( hSoftmaxKernel<T>), dim3(1),dim3(1),128, *stream, vsyn0, vsyn1, vexpTable, vneu1e, alpha, vectorLength, code, expLength, isInference);
}
template <typename T>
__global__ void nSamplingKernel(void *vsyn0, void *vsyn1Neg, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference) {
auto syn0 = reinterpret_cast<T*>(vsyn0);
auto syn1Neg = reinterpret_cast<T*>(vsyn1Neg);
auto expTable = reinterpret_cast<T*>(vexpTable);
auto neu1e = reinterpret_cast<T*>(vneu1e);
T dot = (T) 0.0f;
T g = (T) 0.0f;
for (int e = 0; e < vectorLength; e++) {
dot += syn0[e] * syn1Neg[e];
}
if (dot > HS_MAX_EXP)
g = (code - 1) * alpha;
else if (dot < (T) - HS_MAX_EXP)
g = (code - 0) * alpha;
else {
int idx = (int) ((dot + (T) HS_MAX_EXP) * ((T) expLength / HS_MAX_EXP / 2.0));
if (idx >= expLength)
return;
if (idx < 0)
return;
g = ((T) code - expTable[idx]) * alpha;
}
// axpy1
for (int e = 0; e < vectorLength; e++) {
neu1e[e] = g * syn1Neg[e] + neu1e[e];
}
// axpy2
if (!isInference) {
for (int e = 0; e < vectorLength; e++) {
syn1Neg[e] = g * syn0[e] + syn1Neg[e];
}
}
}
template <typename T>
void nSampling_(void *vsyn0, void *vsyn1Neg, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference, hipStream_t* stream) {
hipLaunchKernelGGL(( nSamplingKernel<T>), dim3(1),dim3(1),128, *stream, vsyn0, vsyn1Neg, vexpTable, vneu1e, alpha, vectorLength, code, expLength, isInference);
}
/*
* binarySearch - find element in haystack buffer (haystack - sorted device memory)
* */
int binarySearch(const int *haystack, const int needle, const int totalElements) {
int firstIndex = 0;
int lastIndex = totalElements - 1;
int halfIndex = sd::math::nd4j_floor<float, int>((lastIndex + firstIndex) / (float) 2);
while(haystack[halfIndex] != needle && firstIndex < lastIndex) {
if (needle < haystack[halfIndex]) {
lastIndex = halfIndex - 1;
} else if (needle > haystack[halfIndex]) {
firstIndex = halfIndex + 1;
}
halfIndex = sd::math::nd4j_floor<float, int>((lastIndex + firstIndex) / (float) 2);
}
return (haystack[halfIndex] == needle) ? halfIndex : -1;
}
template <typename T>
__global__ void addInfVectorKernel(T* neu1, T* infVector, int vectorLength) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < vectorLength; i += step) {
neu1[i] += infVector[i];
}
}
template <typename T>
void skipgram_(NDArray& s0, NDArray& s1, NDArray& s1n, NDArray& expTableV, NDArray& negTableV, NDArray& infV, int target, int ngStarter, NDArray& indices, NDArray& codes, double alpha, Nd4jLong randomValue, const int hsRounds, const int nsRounds) {
// void *vsyn0, void *vsyn1, void *vsyn1Neg, void *vexpTable, void *vnegTable, void *vinfVector, int target, int ngStarter, int *indices, int8_t *codes, double alpha, Nd4jLong randomValue, const int hsRounds, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength) {
auto syn0 = reinterpret_cast<T*>(s0.specialBuffer());
auto syn1 = reinterpret_cast<T*>(s1.specialBuffer());
auto syn1Neg = reinterpret_cast<T*>(s1n.specialBuffer());
auto expTable = reinterpret_cast<T*>(expTableV.specialBuffer());
auto negTable = reinterpret_cast<T*>(negTableV.specialBuffer());
auto infVector = reinterpret_cast<T*>(infV.specialBuffer());
const int vocabSize = s0.sizeAt(0);
const int vectorLength = s0.sizeAt(1);
const int expLength = expTableV.lengthOf();
const int negLength = negTableV.lengthOf();
indices.tickReadDevice();
indices.syncToHost();
codes.tickReadDevice();
codes.syncToHost();
auto stream = s0.getContext()->getCudaStream();
T* neu1e; // = new T[vectorLength];
//memset(neu1e, 0, vectorLength * sizeof(T));
auto err = hipMalloc(&neu1e, sizeof(T) * vectorLength);
err = hipMemset(neu1e, 0, sizeof(T) * vectorLength);
// hierarchic softmax goes first (if enabled)
auto syn0row = infVector != nullptr ? infVector : syn0 + (target * vectorLength);
auto irow = 0;
if (hsRounds > 0) {
for (int r = 0; r < hsRounds; r++) {
irow = indices.t<int>(r);
if (irow < 0 || irow >= vocabSize)
break;
hSoftmax_<T>(syn0row, syn1 + (irow * vectorLength), expTable, neu1e, alpha, vectorLength, codes.t<int8_t>(r), expLength, infVector != nullptr, stream);
}
}
// negative sampling goes second (if enabled)
auto nsStarter = ngStarter;
irow = nsStarter;
if (nsRounds > 0) {
for (int r = 0; r < nsRounds + 1; r++) {
if (r == 0) {
// target is known in advance
} else {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong >((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : negTableV.e<int>(idx);
if (irow < 0 || irow >= vocabSize) irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
}
nSampling_<T>(syn0row, syn1Neg + (irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
}
}
if (infVector == nullptr) {
hipLaunchKernelGGL(( addInfVectorKernel<T>), dim3(128), dim3(256), 256, *stream, syn0row, neu1e, vectorLength);
} else {
hipLaunchKernelGGL(( addInfVectorKernel<T>), dim3(128), dim3(256), 256, *stream, infVector, neu1e, vectorLength);
}
err = hipStreamSynchronize(*stream);
if (0 != err) {
throw cuda_exception::build("helpers::skipgram_: Cannot synchronize stream after addInfVectorKernel", err);
}
err = hipFree(neu1e);
if (0 != err) {
throw cuda_exception::build("helpers::skipgram_: Cannot deallocate temp memory for lingual net", err);
}
}
BUILD_SINGLE_TEMPLATE(template void skipgram_, (NDArray& syn0, NDArray& syn1, NDArray& syn1Neg, NDArray& expTable, NDArray& negTable, NDArray& infVector, int target, int ngStarter, NDArray& indices, NDArray& codes, double alpha, Nd4jLong randomValue, const int hsRounds, const int nsRounds), FLOAT_TYPES);
/*
* batched version of skipgram routine
* */
template <typename T>
void skipgramBatchExec_(NDArray &s0, NDArray &s1, NDArray &s1n, NDArray& expTableV, NDArray& negTableV, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, const int nsRounds, const bool preciseMode, const int numThreads) {
// (NDArray &s0, NDArray &s1, NDArray &s1n, NDArray& expTable, NDArray& negTable, NDArray& infVector, NDArray& targets, NDArray& negStarters, NDArray& indices, NDArray& codes, NDArray& lr, NDArray& nextRandom, const int nsRounds, const bool preciseMode, const int numThreads) {
//auto syn0 = reinterpret_cast<T*>(vsyn0);
//auto syn1 = reinterpret_cast<T*>(vsyn1);
//auto syn1Neg = reinterpret_cast<T*>(vsyn1Neg);
auto stream = s0.getContext()->getCudaStream();
negTableV.tickReadDevice();
negTableV.syncToHost();
const auto expTable = reinterpret_cast<T*>(expTableV.specialBuffer());
const auto negTable = reinterpret_cast<T*>(negTableV.buffer());
const auto infVector = (T*)nullptr; //reinterpret_cast<T*>(infVector.specialBuffer());
const int vocabSize = s0.sizeAt(0);
const int vectorLength = s0.sizeAt(1);
const int expLength = expTableV.lengthOf();
const int negLength = negTableV.lengthOf();
//T sneu1e[600];
//const auto numThreads = omp_get_max_threads();
const auto idxShift = indices.isEmpty() ? 0 : indices.sizeAt(1);
const auto hsRounds = codes.isEmpty() ? 0 : codes.sizeAt(1);
// regular mode provides 0 guarantees for reproducibility
auto numTargets = targets.lengthOf();
targets.syncToHost();
indices.syncToHost();
codes.syncToHost();
lr.syncToHost();
nextRandom.syncToHost();
negStarters.tickReadDevice();
negStarters.syncToHost();
auto bTarget = reinterpret_cast<int*>(targets.buffer()); //targets.bufferAsT<int>();
auto bIndices = reinterpret_cast<int*>(indices.buffer()); //indices.bufferAsT<int>();
auto bCodes = reinterpret_cast<int8_t*>(codes.buffer()); //codes.bufferAsT<int8_t>();
// PRAGMA_OMP_PARALLEL_FOR_ARGS(num_threads(numThreads))
for (int t = 0; t < numTargets; t++) {
T* neu1e;//lvectorLength <= 600 ? sneu1e : new T[vectorLength];
auto err = hipMalloc(&neu1e, vectorLength * sizeof(T));
err = hipMemset(neu1e, 0, vectorLength * sizeof(T));
//memset(neu1e, 0, vectorLength * sizeof(T));
auto target = bTarget[t];
auto alpha = lr.e<double>(t);
unsigned long long randomValue = nextRandom.e<Nd4jLong>(t);
auto syn0row = reinterpret_cast<T*>(s0.specialBuffer()) + (target * vectorLength);
if (hsRounds > 0) {
int irow = 0;
auto cShift = t * idxShift;
for (int e = 0; e < hsRounds; e++) {
irow = bIndices[e + cShift];
if (irow < 0 || irow >= vocabSize)
continue;
auto syn1row = reinterpret_cast<T*>(s1.specialBuffer()) + (irow * vectorLength);
auto code = bCodes[e + cShift];
//nd4j_printf("syn0: [%i]; syn1: [%i]; code: [%i]\n", target, irow, code);
hSoftmax_<T>(syn0row, syn1row, expTable, neu1e, alpha, vectorLength, code, expLength, false, stream);
}
}
if (nsRounds > 0) {
int irow = negStarters.e<int>(t);
int nsStarter = irow;
for (int r = 0; r < nsRounds + 1; r++) {
if (r == 0) {
// target is known in advance
} else {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong >((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : static_cast<int>(negTable[idx]);
if (irow < 0 || irow >= vocabSize)
irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
}
auto syn1row = reinterpret_cast<T*>(s1n.specialBuffer()) + (irow * vectorLength);
nSampling_<T>(syn0row, syn1row, expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, false, stream);
}
}
hipLaunchKernelGGL(( addInfVectorKernel<T>), dim3(128), dim3(256), 256, *stream, syn0row, neu1e, vectorLength);
err = hipStreamSynchronize(*stream);
if (0 != err) {
throw cuda_exception::build("helpers::skipgramBatchExec_: Cannot synchronize stream after addInfVectorKernel", err);
}
// optionally release temp arrays
err = hipFree(neu1e);
if (err != 0) {
throw cuda_exception::build("helpers::skipgramBatchExec_: Cannot deallocate memory with stage", err);
break;
}
// if (vectorLength > 600)
// delete[] neu1e;
}
}
BUILD_SINGLE_TEMPLATE(template void skipgramBatchExec_, (NDArray &s0, NDArray &s1, NDArray &s1n, NDArray& expTable, NDArray& negTable, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, const int nsRounds, const bool preciseMode, const int numThreads), FLOAT_TYPES);
void skipgram(NDArray &syn0, NDArray &syn1, NDArray &syn1Neg, NDArray &expTable, NDArray &negTable,
NDArray &target, NDArray &ngStarter, int nsRounds, NDArray &indices, NDArray &codes, NDArray &alpha, NDArray &randomValue, NDArray &inferenceVector, const bool preciseMode, const int numWorkers) {
auto xType = syn0.dataType();
// single round case
if ((ngStarter.isScalar() && !ngStarter.isEmpty())|| (target.isScalar() && !target.isEmpty())) {
auto hsRounds = codes.lengthOf();
target.syncToHost();
ngStarter.syncToHost();
alpha.syncToHost();
randomValue.syncToHost();
auto targetV = target.isEmpty() ? -1 : target.e<int>(0);
auto starterV = ngStarter.isEmpty() ? -1 : ngStarter.e<int>(0);
auto alphaV = alpha.e<double>(0);
auto randomV = randomValue.e<Nd4jLong>(0);
BUILD_SINGLE_SELECTOR(xType, skipgram_, (syn0, syn1, syn1Neg, expTable, negTable, inferenceVector, targetV, starterV, indices, codes, alphaV, randomV, hsRounds, nsRounds), FLOAT_TYPES);
} else if (ngStarter.isVector() || target.isVector()){
// batch mode
// NDArray& infVector, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, const int nsRounds, const bool preciseMode, const int numThreads)
BUILD_SINGLE_SELECTOR(xType, skipgramBatchExec_, (syn0, syn1, syn1Neg, expTable, negTable, target, ngStarter, indices, codes, alpha, randomValue, nsRounds, preciseMode, numWorkers), FLOAT_TYPES);
} else
throw std::runtime_error("SkipGram: target must have rank 0 or 1");
}
template <typename T>
static __global__ void checkContextKernel(int* context, T* syn0, T* neu1, int contextWidth, int vectorLength, int vocabSize) {
__shared__ bool hasError;
if (0 == threadIdx.x) {
hasError = false;
}
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int c = start; c < contextWidth; c += step) {
if (context[c] >= vocabSize)
hasError = true; //throw std::runtime_error("Bad context 4");
if (!hasError) {
T *syn0word = syn0 + (context[c] * vectorLength);
for (int i = 0; i < vectorLength; i++) {
neu1[i] += syn0word[i];
}
}
}
if (threadIdx.x == 0) {
if (hasError)
neu1[0] = DataTypeUtils::infOrMax<T>();
}
__syncthreads();
}
template <typename T>
__global__ void shiftKernel(T* neu1, T* infVector, int contextWidth, int vectorLength) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < vectorLength; i += step) {
neu1[i] /= contextWidth + int(infVector != nullptr); // ? 1 : 0);
}
}
template <typename T>
__global__ void fillUpSynonymsKernel(int starter, int contextWidth, int vectorLength, int* lockedWords, int* context, T* neu1e, T* syn0) {
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int c = starter + start; c < contextWidth; c += step) {
if (lockedWords[c] == 1)
continue;
T *syn0word = syn0 + (context[c] * vectorLength);
for (int i = 0; i < vectorLength; i++) {
syn0word[i] += neu1e[i];
}
}
}
template <typename T>
void cbow_(LaunchContext* lc, void *vsyn0, void *vsyn1, void *vsyn1Neg, void *vexpTable, void *vnegTable, void *vinfVector, int target, int ngStarter, int *context, int *lockedWords, int *indices, int8_t *codes, double alpha, Nd4jLong randomValue, const int contextWidth, const int hsRounds, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const int numLabels, const bool trainWords) {
auto syn0 = reinterpret_cast<T *>(vsyn0);
auto syn1 = reinterpret_cast<T *>(vsyn1);
auto syn1Neg = reinterpret_cast<T *>(vsyn1Neg);
auto expTable = reinterpret_cast<T *>(vexpTable);
auto negTable = reinterpret_cast<T *>(vnegTable);
auto infVector = reinterpret_cast<T *>(vinfVector);
auto stream = lc->getCudaStream();
T* neu1; // = new T[vectorLength];
T* neu1e; // = new T[vectorLength];
size_t buffSize = sizeof(T) * vectorLength;
auto err = hipMalloc(&neu1, buffSize);
err = hipMalloc(&neu1e, buffSize);
err = hipMemset(neu1, 0, buffSize);
err = hipMemset(neu1e, 0, buffSize);
// building neu1 for current window
hipLaunchKernelGGL(( checkContextKernel<T>), dim3(1),dim3(1),128,*stream, context, syn0, neu1, contextWidth, vectorLength, vocabSize);
T checkVal;
err = hipMemcpy(&checkVal, neu1, sizeof(T), hipMemcpyDeviceToHost);
if (DataTypeUtils::infOrMax<T>() == checkVal)
throw std::runtime_error("Bad context 4");
// for inference we add additional inference vector
if (infVector != nullptr) {
hipLaunchKernelGGL(( addInfVectorKernel<T>), dim3(128), dim3(256), 128, *stream, neu1, infVector, vectorLength);
}
// average neu1
if (contextWidth > 0) {
hipLaunchKernelGGL(( shiftKernel<T>), dim3(128), dim3(256), 128, *stream, neu1, infVector, contextWidth, vectorLength);
}
// softmax round
if (hsRounds > 0) {
for (int i = 0; i < hsRounds; i++) {
if (indices[i] < 0 || indices[i] >= vocabSize)
throw std::runtime_error("Bad context 5");
T* syn1Shifted = syn1 + (indices[i] * vectorLength);
hSoftmax_<T>(neu1, syn1Shifted, expTable, neu1e, alpha, vectorLength, codes[i], expLength, infVector != nullptr, stream);
}
}
auto nsStarter = ngStarter;
auto irow = nsStarter;
if (nsRounds > 0) {
for (int r = 0; r < nsRounds + 1; r++) {
if (r == 0) {
// target is known in advance
} else {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong >((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : static_cast<int>(negTable[idx]);
if (irow < 0 || irow >= vocabSize) irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
}
nSampling_<T>(neu1, syn1Neg + (irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
}
}
// if we don't train words - we skip start of idxSyn0
int starter = trainWords == 1 ? 0 : contextWidth - numLabels;
// propagate neu1e -> syn0
if (infVector == nullptr) {
hipLaunchKernelGGL(( fillUpSynonymsKernel<T>), dim3(1),dim3(1),128, *stream, starter, contextWidth, vectorLength, lockedWords, context, neu1e, syn0);
} else {
for (int i = 0; i < vectorLength; i++) {
infVector[i] += neu1e[i];
}
}
err = hipStreamSynchronize(*stream);
if (0 != err) {
throw cuda_exception::build(
"helpers::cbow_: Cannot synchronize stream after kernel executing", err);
}
err = hipFree(neu1);
if (0 != err) {
throw cuda_exception::build(
"helpers::cbow_: Cannot deallocate memory for synonims table", err);
}
err = hipFree(neu1e);
if (0 != err) {
throw cuda_exception::build(
"helpers::cbow_: Cannot deallocate memory for antonims table", err);
}
}
BUILD_SINGLE_TEMPLATE(template void cbow_, (LaunchContext* lc, void *syn0, void *syn1, void *syn1Neg, void *expTable, void *vnegTable, void *vinfVector, int target, int ngStarter, int *context, int *lockedWords, int *indices, int8_t *codes, double alpha, Nd4jLong randomValue, const int contextWidth, const int hsRounds, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const int numLabels, const bool trainWords), FLOAT_TYPES);
template <typename T>
static __global__ void buildCurrentWindowKernel(int vocabSize, int contextWidth, int vectorLength, int* bContext, T* syn0, T* neu1, int* actualContext, int e) {
// building neu1 for current window
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int c = start; c < contextWidth; c += step) {
// getting next context word
auto cContext = bContext[c + (e * contextWidth)];
// skipping padded values
if (cContext < 0)
continue;
// if (cContext >= vocabSize)
// throw std::runtime_error("ContextID can't be >= vocab size");
T *syn0word = syn0 + (cContext * vectorLength);
for (int i = 0; i < vectorLength; i++)
neu1[i] += syn0word[i];
atomicAdd(actualContext, 1);
}
}
template <typename T>
__global__ void arrangeNeuKernel(int vectorLength, T* neu1, T* infVector, int* actualContext) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < vectorLength && *actualContext > 0; i += step)
neu1[i] /= (*actualContext + int(infVector != nullptr));
}
template <typename T>
__global__ void applyShiftKernel(int* bContext, int* bLocker, T* syn0, T* neu1e, int contextWidth, int vectorLength, int e, int starter) {
auto step = blockDim.x * gridDim.x;
auto start = blockDim.x * blockIdx.x + threadIdx.x;
for (int c = starter + start; c < contextWidth; c += step) {
// getting context
auto cContext = bContext[c + (e * contextWidth)];
auto cLock = bLocker[c + (e * contextWidth)];
// skipping padded values
if (cContext < 0 || cLock == 1)
continue;
// if (cContext >= vocabSize)
// throw std::runtime_error("ContextID can't be > vocab size");
// one word from context
T *syn0word = syn0 + (cContext * vectorLength);
for (int i = 0; i < vectorLength; i++)
syn0word[i] += neu1e[i];
}
}
template <typename T>
void cbowBatchExec_(LaunchContext* lc, NDArray &s0, NDArray &s1, NDArray &s1n, void *vexpTable, void *vnegTable, void *vinfVector, NDArray &context, NDArray &lockedWords, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, NDArray &nLabels, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const bool trainWords, const int numThreads) {
const auto syn0 = reinterpret_cast<T*>(s0.specialBuffer()); //bufferAsT<T>();
const auto syn1 = reinterpret_cast<T*>(s1.specialBuffer()); //bufferAsT<T>();
const auto syn1Neg = reinterpret_cast<T*>(s1n.specialBuffer()); //bufferAsT<T>();
const auto expTable = reinterpret_cast<T*>(vexpTable);
const auto negTable = reinterpret_cast<T*>(vnegTable);
const auto infVector = reinterpret_cast<T*>(vinfVector);
auto stream = lc->getCudaStream();
indices.syncToHost();
codes.syncToHost();
negStarters.syncToHost();
context.syncToHost();
//const auto numThreads = omp_get_max_threads();
const auto idxShift = indices.isEmpty() ? 0 : indices.sizeAt(1);
const auto hsRounds = codes.isEmpty() ? 0 : codes.sizeAt(1);
const auto numTargets = context.sizeAt(0);
const int contextWidth = context.sizeAt(1);
//const auto bContext = reinterpret_cast<int*>(context.buffer()); //bufferAsT<int>();
const auto dContext = context.dataBuffer()->specialAsT<int>(); //bufferAsT<int>();
// const auto bLocker = reinterpret_cast<int*>(lockedWords.buffer()); //lockedWords.bufferAsT<int>();
const auto dLocker = lockedWords.dataBuffer()->specialAsT<int>(); //.specialBuffer()); //lockedWords.bufferAsT<int>();
const auto bIndices = indices.dataBuffer()->primaryAsT<int>(); //buffer());//AsT<int>();
const auto bCodes = codes.dataBuffer()->primaryAsT<int8_t>(); //reinterpret_cast<int8_t*>(codes.buffer()); //bufferAsT<int8_t>();
const auto bStarters = negStarters.dataBuffer()->primaryAsT<int>(); //reinterpret_cast<int*>(negStarters.buffer()); //AsT<int>();
const auto numIndices = indices.isEmpty() ? 0 : indices.sizeAt(1);
lr.syncToHost();
nLabels.syncToHost();
//PRAGMA_OMP_PARALLEL_FOR_ARGS(num_threads(numThreads) private(sneu1, sneu1e))
//NDArray neuVector('c', {vectorLength}, DataTypeUtils::fromT<T>());
// auto neuEVector = neuVector; //NDArrayFactory::create<T>('c', {vectorLength});
T* neu1; // = reinterpret_cast<T*>(neuVector.specialBuffer());// = vectorLength <= 600 ? sneu1 : new T[vectorLength];
T* neu1e; // = reinterpret_cast<T*>(neuVector.specialBuffer()); // = vectorLength <= 600 ? sneu1e : new T[vectorLength];
auto cerr = hipMalloc(&neu1, sizeof(T) * vectorLength);
if (cerr) {
throw cuda_exception::build("Cannot allocate temp vector buffer", cerr);
}
cerr = hipMalloc(&neu1e, sizeof(T) * vectorLength);
if (cerr) {
throw cuda_exception::build("Cannot allocate temp vector buffer", cerr);
}
int* actualContext;
cerr = hipMalloc(&actualContext, sizeof(int));
if (cerr) {
throw cuda_exception::build("Cannot allocate counter buffer", cerr);
}
for (int e = 0; e < numTargets; e++) {
// auto err = hipMalloc(&neu1, sizeof(T)* vectorLength);
// q err = hipMalloc(&neu1e, sizeof(T)*vectorLength);
//
// // optionally we nullify temp arrays after successful (and on first) cycle
// memset(neu1, 0, sizeof(T) * vectorLength);
// memset(neu1e, 0, sizeof(T) * vectorLength);
auto alpha = lr.e<double>(e);
auto numLabels = nLabels.isEmpty() ? 0 : nLabels.e<int>(e);
// auto err = hipMemset(actualContext, 0, sizeof(int));
// if (err) {
// printf("Cuda error %d\n", err); break;
// }
hipLaunchKernelGGL(( buildCurrentWindowKernel<T>), dim3(1),dim3(1),128, *stream, vocabSize, contextWidth, vectorLength, dContext, syn0, neu1, actualContext, e);
hipLaunchKernelGGL(( arrangeNeuKernel<T>), dim3(1),dim3(1),128, *stream, vectorLength, neu1, infVector, actualContext);
// hierarchic softmax step
if (!indices.isEmpty()) {
for (int i = 0; i < numIndices; i++) {
const int cIndex = bIndices[(e * numIndices) + i];
const int cCode = bCodes[(e * numIndices) + i];
// we're skipping padded values
if (cIndex < 0)
continue;
if (cIndex >= vocabSize)
throw std::runtime_error("Index can't be > vocab size");
hSoftmax_<T>(neu1, syn1 + (cIndex * vectorLength), expTable, neu1e, alpha, vectorLength, cCode, expLength, false, stream);
}
}
// negative sampling step
if (!negStarters.isEmpty() && nsRounds > 0) {
int irow = bStarters[e];
const int nsStarter = irow;
unsigned long long randomValue = nextRandom.e<Nd4jLong>(e);
for (int r = 0; r < nsRounds + 1; r++) {
// we're skipping rng on 0 step
if (r != 0) {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong>((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : static_cast<int>(negTable[idx]);
if (irow < 0 || irow >= vocabSize) irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
nSampling_<T>(neu1, s1n.bufferWithOffset(irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
} else {
nSampling_<T>(neu1, s1n.bufferWithOffset(irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
}
//nd4j_printf("Thread <%i>: syn0: [%i]; s1n: [%i];\n", omp_get_thread_num(), 0, irow);
}
}
// if we're skipping labels
int starter = trainWords == 1 ? 0 : contextWidth - numLabels;
// applying previously averaged results
hipLaunchKernelGGL(( applyShiftKernel<T>), dim3(1),dim3(1),128, *stream, dContext, dLocker, syn0, neu1e, contextWidth, vectorLength, e, starter);
// optionally release temp arrays
// if (vectorLength > 600) {
// }
}
cerr = hipStreamSynchronize(*stream);
if (cerr) {
throw cuda_exception::build("Cannot syncronize stream before memory deallocation", cerr);
}
cerr = hipFree(neu1);
if (cerr) {
throw cuda_exception::build("Cannot deallocate temp buffer1", cerr);
}
cerr = hipFree(neu1e);
if (cerr) {
throw cuda_exception::build("Cannot deallocate temp buffer1 E", cerr);
}
cerr = hipFree(actualContext);
if (cerr) {
throw cuda_exception::build("Cannot deallocate temp buffer1", cerr);
}
}
BUILD_SINGLE_TEMPLATE(template void cbowBatchExec_, (LaunchContext* lc, NDArray &s0, NDArray &s1, NDArray &s1n, void *vexpTable, void *vnegTable, void *vinfVector, NDArray &context, NDArray &lockedWords, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, NDArray &nLabels, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const bool trainWords, const int numThreads), FLOAT_TYPES);
void cbow(NDArray &syn0, NDArray &syn1, NDArray &syn1Neg, NDArray &expTable, NDArray &negTable, NDArray &target, NDArray &ngStarter, int nsRounds, NDArray &context, NDArray &lockedWords, NDArray &indices, NDArray &codes, NDArray &alpha, NDArray &randomValue, NDArray &numLabels, NDArray &inferenceVector, const bool trainWords, int numWorkers) {
auto xType = syn0.dataType();
auto lc = context.getContext();
indices.syncToHost();
NDArray::prepareSpecialUse({&syn0, &syn1, &syn1Neg, &expTable, &negTable, &target, &ngStarter}, {&context, &lockedWords, &indices, &codes, &alpha, &randomValue, &numLabels, &inferenceVector});
//auto stream = lc->getCudaStream();
if ((context.rankOf() == 0 || context.rankOf() == 1) && (indices.rankOf() == 1 || indices.rankOf() == 0)) {
// single round case
/*nd4j_printf("Row exec; ContextWidth: %i; LockedWords: %i; numLabels: %i; Train words: %i\n", (int) context.lengthOf(), (int) lockedWords.lengthOf(), numLabels.isEmpty() ? 0 : numLabels.e<int>(0), (int) trainWords);
if (context.lengthOf() == 2) {
context.printBuffer("context");
lockedWords.printBuffer("locked");
codes.printBuffer("codes");
indices.printBuffer("indices");
}*/
auto hsRounds = codes.lengthOf();
target.syncToHost();
numLabels.syncToHost();
target.syncToHost();
alpha.syncToHost();
numLabels.syncToHost();
codes.syncToHost();
negTable.syncToHost();
BUILD_SINGLE_SELECTOR(xType, cbow_, (lc, syn0.specialBuffer(), syn1.specialBuffer(), syn1Neg.specialBuffer(), expTable.specialBuffer(), negTable.buffer(), inferenceVector.specialBuffer(), target.isEmpty() ? -1 : target.e<int>(0), ngStarter.isEmpty() ? -1 : ngStarter.e<int>(0), reinterpret_cast<int *>(context.specialBuffer()), reinterpret_cast<int *>(lockedWords.specialBuffer()),reinterpret_cast<int *>(indices.buffer()), reinterpret_cast<int8_t *>(codes.buffer()), alpha.e<double>( 0), randomValue.e<Nd4jLong>(0), (int) context.lengthOf(), hsRounds, nsRounds, (int) syn0.sizeAt(0), (int) syn0.sizeAt(1), (int) expTable.lengthOf(), (int) negTable.lengthOf(), numLabels.isEmpty() ? 0 : numLabels.e<int>(0), trainWords), FLOAT_TYPES);
} else if (context.rankOf() == 2 && indices.rankOf() == 2) {
// batch mode
//nd4j_printf("Batch exec\n","");
BUILD_SINGLE_SELECTOR(xType, cbowBatchExec_, (lc, syn0, syn1, syn1Neg, expTable.specialBuffer(), negTable.specialBuffer(), nullptr, context, lockedWords, target, ngStarter, indices, codes, alpha, randomValue, numLabels, nsRounds, syn0.sizeAt(0), syn0.sizeAt(1), expTable.lengthOf(), negTable.isEmpty() ? 0 : negTable.lengthOf(), trainWords, numWorkers), FLOAT_TYPES);
} else
throw std::runtime_error("CBOW: context must have rank 0/1 or 2");
NDArray::registerSpecialUse({&syn0, &syn1, &syn1Neg, &expTable, &negTable, &target, &ngStarter}, {&context, &lockedWords, &indices, &codes, &alpha, &randomValue, &numLabels, &inferenceVector});
}
}
}
} | 91cce98de42976fb79a0a1f1b4883d6b534472dc.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/sg_cb.h>
#include <exceptions/cuda_exception.h>
#include <array/NDArrayFactory.h>
#define HS_MAX_EXP 6.0f
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
__global__ void hSoftmaxKernel(void *vsyn0, void *vsyn1, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference) {
auto syn0 = reinterpret_cast<T*>(vsyn0);
auto syn1 = reinterpret_cast<T*>(vsyn1);
auto expTable = reinterpret_cast<T*>(vexpTable);
auto neu1e = reinterpret_cast<T*>(vneu1e);
T dot(0.0f);
T g(0.0f);
T f(0.0f);
// dot
for (int e = 0; e < vectorLength; e++) {
dot += syn0[e] * syn1[e];
}
// gradient
if (dot < (T) - HS_MAX_EXP || dot >= (T) HS_MAX_EXP)
return;
int idx = static_cast<int>((dot + HS_MAX_EXP) * ((float) expLength / HS_MAX_EXP / 2.0f));
if (idx >= expLength || idx < 0)
return;
f = expTable[idx];
g = (static_cast<T>(1.0f) - static_cast<T>(code) - f) * (T) alpha;
// axpy1
for (int e = 0; e < vectorLength; e++) {
neu1e[e] = g * syn1[e] + neu1e[e];
}
// axpy2
if (!isInference) {
for (int e = 0; e < vectorLength; e++) {
syn1[e] = g * syn0[e] + syn1[e];
}
}
}
template <typename T>
void hSoftmax_(void *vsyn0, void *vsyn1, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference, cudaStream_t* stream) {
hSoftmaxKernel<T><<<1,1,128, *stream>>>(vsyn0, vsyn1, vexpTable, vneu1e, alpha, vectorLength, code, expLength, isInference);
}
template <typename T>
__global__ void nSamplingKernel(void *vsyn0, void *vsyn1Neg, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference) {
auto syn0 = reinterpret_cast<T*>(vsyn0);
auto syn1Neg = reinterpret_cast<T*>(vsyn1Neg);
auto expTable = reinterpret_cast<T*>(vexpTable);
auto neu1e = reinterpret_cast<T*>(vneu1e);
T dot = (T) 0.0f;
T g = (T) 0.0f;
for (int e = 0; e < vectorLength; e++) {
dot += syn0[e] * syn1Neg[e];
}
if (dot > HS_MAX_EXP)
g = (code - 1) * alpha;
else if (dot < (T) - HS_MAX_EXP)
g = (code - 0) * alpha;
else {
int idx = (int) ((dot + (T) HS_MAX_EXP) * ((T) expLength / HS_MAX_EXP / 2.0));
if (idx >= expLength)
return;
if (idx < 0)
return;
g = ((T) code - expTable[idx]) * alpha;
}
// axpy1
for (int e = 0; e < vectorLength; e++) {
neu1e[e] = g * syn1Neg[e] + neu1e[e];
}
// axpy2
if (!isInference) {
for (int e = 0; e < vectorLength; e++) {
syn1Neg[e] = g * syn0[e] + syn1Neg[e];
}
}
}
template <typename T>
void nSampling_(void *vsyn0, void *vsyn1Neg, void *vexpTable, void *vneu1e, double alpha, int vectorLength, int code, int expLength, bool isInference, cudaStream_t* stream) {
nSamplingKernel<T><<<1,1,128, *stream>>>(vsyn0, vsyn1Neg, vexpTable, vneu1e, alpha, vectorLength, code, expLength, isInference);
}
/*
* binarySearch - find element in haystack buffer (haystack - sorted device memory)
* */
int binarySearch(const int *haystack, const int needle, const int totalElements) {
int firstIndex = 0;
int lastIndex = totalElements - 1;
int halfIndex = sd::math::nd4j_floor<float, int>((lastIndex + firstIndex) / (float) 2);
while(haystack[halfIndex] != needle && firstIndex < lastIndex) {
if (needle < haystack[halfIndex]) {
lastIndex = halfIndex - 1;
} else if (needle > haystack[halfIndex]) {
firstIndex = halfIndex + 1;
}
halfIndex = sd::math::nd4j_floor<float, int>((lastIndex + firstIndex) / (float) 2);
}
return (haystack[halfIndex] == needle) ? halfIndex : -1;
}
template <typename T>
__global__ void addInfVectorKernel(T* neu1, T* infVector, int vectorLength) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < vectorLength; i += step) {
neu1[i] += infVector[i];
}
}
template <typename T>
void skipgram_(NDArray& s0, NDArray& s1, NDArray& s1n, NDArray& expTableV, NDArray& negTableV, NDArray& infV, int target, int ngStarter, NDArray& indices, NDArray& codes, double alpha, Nd4jLong randomValue, const int hsRounds, const int nsRounds) {
// void *vsyn0, void *vsyn1, void *vsyn1Neg, void *vexpTable, void *vnegTable, void *vinfVector, int target, int ngStarter, int *indices, int8_t *codes, double alpha, Nd4jLong randomValue, const int hsRounds, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength) {
auto syn0 = reinterpret_cast<T*>(s0.specialBuffer());
auto syn1 = reinterpret_cast<T*>(s1.specialBuffer());
auto syn1Neg = reinterpret_cast<T*>(s1n.specialBuffer());
auto expTable = reinterpret_cast<T*>(expTableV.specialBuffer());
auto negTable = reinterpret_cast<T*>(negTableV.specialBuffer());
auto infVector = reinterpret_cast<T*>(infV.specialBuffer());
const int vocabSize = s0.sizeAt(0);
const int vectorLength = s0.sizeAt(1);
const int expLength = expTableV.lengthOf();
const int negLength = negTableV.lengthOf();
indices.tickReadDevice();
indices.syncToHost();
codes.tickReadDevice();
codes.syncToHost();
auto stream = s0.getContext()->getCudaStream();
T* neu1e; // = new T[vectorLength];
//memset(neu1e, 0, vectorLength * sizeof(T));
auto err = cudaMalloc(&neu1e, sizeof(T) * vectorLength);
err = cudaMemset(neu1e, 0, sizeof(T) * vectorLength);
// hierarchic softmax goes first (if enabled)
auto syn0row = infVector != nullptr ? infVector : syn0 + (target * vectorLength);
auto irow = 0;
if (hsRounds > 0) {
for (int r = 0; r < hsRounds; r++) {
irow = indices.t<int>(r);
if (irow < 0 || irow >= vocabSize)
break;
hSoftmax_<T>(syn0row, syn1 + (irow * vectorLength), expTable, neu1e, alpha, vectorLength, codes.t<int8_t>(r), expLength, infVector != nullptr, stream);
}
}
// negative sampling goes second (if enabled)
auto nsStarter = ngStarter;
irow = nsStarter;
if (nsRounds > 0) {
for (int r = 0; r < nsRounds + 1; r++) {
if (r == 0) {
// target is known in advance
} else {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong >((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : negTableV.e<int>(idx);
if (irow < 0 || irow >= vocabSize) irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
}
nSampling_<T>(syn0row, syn1Neg + (irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
}
}
if (infVector == nullptr) {
addInfVectorKernel<T><<<128, 256, 256, *stream>>>(syn0row, neu1e, vectorLength);
} else {
addInfVectorKernel<T><<<128, 256, 256, *stream>>>(infVector, neu1e, vectorLength);
}
err = cudaStreamSynchronize(*stream);
if (0 != err) {
throw cuda_exception::build("helpers::skipgram_: Cannot synchronize stream after addInfVectorKernel", err);
}
err = cudaFree(neu1e);
if (0 != err) {
throw cuda_exception::build("helpers::skipgram_: Cannot deallocate temp memory for lingual net", err);
}
}
BUILD_SINGLE_TEMPLATE(template void skipgram_, (NDArray& syn0, NDArray& syn1, NDArray& syn1Neg, NDArray& expTable, NDArray& negTable, NDArray& infVector, int target, int ngStarter, NDArray& indices, NDArray& codes, double alpha, Nd4jLong randomValue, const int hsRounds, const int nsRounds), FLOAT_TYPES);
/*
* batched version of skipgram routine
* */
template <typename T>
void skipgramBatchExec_(NDArray &s0, NDArray &s1, NDArray &s1n, NDArray& expTableV, NDArray& negTableV, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, const int nsRounds, const bool preciseMode, const int numThreads) {
// (NDArray &s0, NDArray &s1, NDArray &s1n, NDArray& expTable, NDArray& negTable, NDArray& infVector, NDArray& targets, NDArray& negStarters, NDArray& indices, NDArray& codes, NDArray& lr, NDArray& nextRandom, const int nsRounds, const bool preciseMode, const int numThreads) {
//auto syn0 = reinterpret_cast<T*>(vsyn0);
//auto syn1 = reinterpret_cast<T*>(vsyn1);
//auto syn1Neg = reinterpret_cast<T*>(vsyn1Neg);
auto stream = s0.getContext()->getCudaStream();
negTableV.tickReadDevice();
negTableV.syncToHost();
const auto expTable = reinterpret_cast<T*>(expTableV.specialBuffer());
const auto negTable = reinterpret_cast<T*>(negTableV.buffer());
const auto infVector = (T*)nullptr; //reinterpret_cast<T*>(infVector.specialBuffer());
const int vocabSize = s0.sizeAt(0);
const int vectorLength = s0.sizeAt(1);
const int expLength = expTableV.lengthOf();
const int negLength = negTableV.lengthOf();
//T sneu1e[600];
//const auto numThreads = omp_get_max_threads();
const auto idxShift = indices.isEmpty() ? 0 : indices.sizeAt(1);
const auto hsRounds = codes.isEmpty() ? 0 : codes.sizeAt(1);
// regular mode provides 0 guarantees for reproducibility
auto numTargets = targets.lengthOf();
targets.syncToHost();
indices.syncToHost();
codes.syncToHost();
lr.syncToHost();
nextRandom.syncToHost();
negStarters.tickReadDevice();
negStarters.syncToHost();
auto bTarget = reinterpret_cast<int*>(targets.buffer()); //targets.bufferAsT<int>();
auto bIndices = reinterpret_cast<int*>(indices.buffer()); //indices.bufferAsT<int>();
auto bCodes = reinterpret_cast<int8_t*>(codes.buffer()); //codes.bufferAsT<int8_t>();
// PRAGMA_OMP_PARALLEL_FOR_ARGS(num_threads(numThreads))
for (int t = 0; t < numTargets; t++) {
T* neu1e;//lvectorLength <= 600 ? sneu1e : new T[vectorLength];
auto err = cudaMalloc(&neu1e, vectorLength * sizeof(T));
err = cudaMemset(neu1e, 0, vectorLength * sizeof(T));
//memset(neu1e, 0, vectorLength * sizeof(T));
auto target = bTarget[t];
auto alpha = lr.e<double>(t);
unsigned long long randomValue = nextRandom.e<Nd4jLong>(t);
auto syn0row = reinterpret_cast<T*>(s0.specialBuffer()) + (target * vectorLength);
if (hsRounds > 0) {
int irow = 0;
auto cShift = t * idxShift;
for (int e = 0; e < hsRounds; e++) {
irow = bIndices[e + cShift];
if (irow < 0 || irow >= vocabSize)
continue;
auto syn1row = reinterpret_cast<T*>(s1.specialBuffer()) + (irow * vectorLength);
auto code = bCodes[e + cShift];
//nd4j_printf("syn0: [%i]; syn1: [%i]; code: [%i]\n", target, irow, code);
hSoftmax_<T>(syn0row, syn1row, expTable, neu1e, alpha, vectorLength, code, expLength, false, stream);
}
}
if (nsRounds > 0) {
int irow = negStarters.e<int>(t);
int nsStarter = irow;
for (int r = 0; r < nsRounds + 1; r++) {
if (r == 0) {
// target is known in advance
} else {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong >((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : static_cast<int>(negTable[idx]);
if (irow < 0 || irow >= vocabSize)
irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
}
auto syn1row = reinterpret_cast<T*>(s1n.specialBuffer()) + (irow * vectorLength);
nSampling_<T>(syn0row, syn1row, expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, false, stream);
}
}
addInfVectorKernel<T><<<128, 256, 256, *stream>>>(syn0row, neu1e, vectorLength);
err = cudaStreamSynchronize(*stream);
if (0 != err) {
throw cuda_exception::build("helpers::skipgramBatchExec_: Cannot synchronize stream after addInfVectorKernel", err);
}
// optionally release temp arrays
err = cudaFree(neu1e);
if (err != 0) {
throw cuda_exception::build("helpers::skipgramBatchExec_: Cannot deallocate memory with stage", err);
break;
}
// if (vectorLength > 600)
// delete[] neu1e;
}
}
BUILD_SINGLE_TEMPLATE(template void skipgramBatchExec_, (NDArray &s0, NDArray &s1, NDArray &s1n, NDArray& expTable, NDArray& negTable, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, const int nsRounds, const bool preciseMode, const int numThreads), FLOAT_TYPES);
void skipgram(NDArray &syn0, NDArray &syn1, NDArray &syn1Neg, NDArray &expTable, NDArray &negTable,
NDArray &target, NDArray &ngStarter, int nsRounds, NDArray &indices, NDArray &codes, NDArray &alpha, NDArray &randomValue, NDArray &inferenceVector, const bool preciseMode, const int numWorkers) {
auto xType = syn0.dataType();
// single round case
if ((ngStarter.isScalar() && !ngStarter.isEmpty())|| (target.isScalar() && !target.isEmpty())) {
auto hsRounds = codes.lengthOf();
target.syncToHost();
ngStarter.syncToHost();
alpha.syncToHost();
randomValue.syncToHost();
auto targetV = target.isEmpty() ? -1 : target.e<int>(0);
auto starterV = ngStarter.isEmpty() ? -1 : ngStarter.e<int>(0);
auto alphaV = alpha.e<double>(0);
auto randomV = randomValue.e<Nd4jLong>(0);
BUILD_SINGLE_SELECTOR(xType, skipgram_, (syn0, syn1, syn1Neg, expTable, negTable, inferenceVector, targetV, starterV, indices, codes, alphaV, randomV, hsRounds, nsRounds), FLOAT_TYPES);
} else if (ngStarter.isVector() || target.isVector()){
// batch mode
// NDArray& infVector, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, const int nsRounds, const bool preciseMode, const int numThreads)
BUILD_SINGLE_SELECTOR(xType, skipgramBatchExec_, (syn0, syn1, syn1Neg, expTable, negTable, target, ngStarter, indices, codes, alpha, randomValue, nsRounds, preciseMode, numWorkers), FLOAT_TYPES);
} else
throw std::runtime_error("SkipGram: target must have rank 0 or 1");
}
template <typename T>
static __global__ void checkContextKernel(int* context, T* syn0, T* neu1, int contextWidth, int vectorLength, int vocabSize) {
__shared__ bool hasError;
if (0 == threadIdx.x) {
hasError = false;
}
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int c = start; c < contextWidth; c += step) {
if (context[c] >= vocabSize)
hasError = true; //throw std::runtime_error("Bad context 4");
if (!hasError) {
T *syn0word = syn0 + (context[c] * vectorLength);
for (int i = 0; i < vectorLength; i++) {
neu1[i] += syn0word[i];
}
}
}
if (threadIdx.x == 0) {
if (hasError)
neu1[0] = DataTypeUtils::infOrMax<T>();
}
__syncthreads();
}
template <typename T>
__global__ void shiftKernel(T* neu1, T* infVector, int contextWidth, int vectorLength) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < vectorLength; i += step) {
neu1[i] /= contextWidth + int(infVector != nullptr); // ? 1 : 0);
}
}
template <typename T>
__global__ void fillUpSynonymsKernel(int starter, int contextWidth, int vectorLength, int* lockedWords, int* context, T* neu1e, T* syn0) {
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int c = starter + start; c < contextWidth; c += step) {
if (lockedWords[c] == 1)
continue;
T *syn0word = syn0 + (context[c] * vectorLength);
for (int i = 0; i < vectorLength; i++) {
syn0word[i] += neu1e[i];
}
}
}
template <typename T>
void cbow_(LaunchContext* lc, void *vsyn0, void *vsyn1, void *vsyn1Neg, void *vexpTable, void *vnegTable, void *vinfVector, int target, int ngStarter, int *context, int *lockedWords, int *indices, int8_t *codes, double alpha, Nd4jLong randomValue, const int contextWidth, const int hsRounds, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const int numLabels, const bool trainWords) {
auto syn0 = reinterpret_cast<T *>(vsyn0);
auto syn1 = reinterpret_cast<T *>(vsyn1);
auto syn1Neg = reinterpret_cast<T *>(vsyn1Neg);
auto expTable = reinterpret_cast<T *>(vexpTable);
auto negTable = reinterpret_cast<T *>(vnegTable);
auto infVector = reinterpret_cast<T *>(vinfVector);
auto stream = lc->getCudaStream();
T* neu1; // = new T[vectorLength];
T* neu1e; // = new T[vectorLength];
size_t buffSize = sizeof(T) * vectorLength;
auto err = cudaMalloc(&neu1, buffSize);
err = cudaMalloc(&neu1e, buffSize);
err = cudaMemset(neu1, 0, buffSize);
err = cudaMemset(neu1e, 0, buffSize);
// building neu1 for current window
checkContextKernel<T><<<1,1,128,*stream>>>(context, syn0, neu1, contextWidth, vectorLength, vocabSize);
T checkVal;
err = cudaMemcpy(&checkVal, neu1, sizeof(T), cudaMemcpyDeviceToHost);
if (DataTypeUtils::infOrMax<T>() == checkVal)
throw std::runtime_error("Bad context 4");
// for inference we add additional inference vector
if (infVector != nullptr) {
addInfVectorKernel<T><<<128, 256, 128, *stream>>>(neu1, infVector, vectorLength);
}
// average neu1
if (contextWidth > 0) {
shiftKernel<T><<<128, 256, 128, *stream>>>(neu1, infVector, contextWidth, vectorLength);
}
// softmax round
if (hsRounds > 0) {
for (int i = 0; i < hsRounds; i++) {
if (indices[i] < 0 || indices[i] >= vocabSize)
throw std::runtime_error("Bad context 5");
T* syn1Shifted = syn1 + (indices[i] * vectorLength);
hSoftmax_<T>(neu1, syn1Shifted, expTable, neu1e, alpha, vectorLength, codes[i], expLength, infVector != nullptr, stream);
}
}
auto nsStarter = ngStarter;
auto irow = nsStarter;
if (nsRounds > 0) {
for (int r = 0; r < nsRounds + 1; r++) {
if (r == 0) {
// target is known in advance
} else {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong >((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : static_cast<int>(negTable[idx]);
if (irow < 0 || irow >= vocabSize) irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
}
nSampling_<T>(neu1, syn1Neg + (irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
}
}
// if we don't train words - we skip start of idxSyn0
int starter = trainWords == 1 ? 0 : contextWidth - numLabels;
// propagate neu1e -> syn0
if (infVector == nullptr) {
fillUpSynonymsKernel<T><<<1,1,128, *stream>>>(starter, contextWidth, vectorLength, lockedWords, context, neu1e, syn0);
} else {
for (int i = 0; i < vectorLength; i++) {
infVector[i] += neu1e[i];
}
}
err = cudaStreamSynchronize(*stream);
if (0 != err) {
throw cuda_exception::build(
"helpers::cbow_: Cannot synchronize stream after kernel executing", err);
}
err = cudaFree(neu1);
if (0 != err) {
throw cuda_exception::build(
"helpers::cbow_: Cannot deallocate memory for synonims table", err);
}
err = cudaFree(neu1e);
if (0 != err) {
throw cuda_exception::build(
"helpers::cbow_: Cannot deallocate memory for antonims table", err);
}
}
BUILD_SINGLE_TEMPLATE(template void cbow_, (LaunchContext* lc, void *syn0, void *syn1, void *syn1Neg, void *expTable, void *vnegTable, void *vinfVector, int target, int ngStarter, int *context, int *lockedWords, int *indices, int8_t *codes, double alpha, Nd4jLong randomValue, const int contextWidth, const int hsRounds, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const int numLabels, const bool trainWords), FLOAT_TYPES);
template <typename T>
static __global__ void buildCurrentWindowKernel(int vocabSize, int contextWidth, int vectorLength, int* bContext, T* syn0, T* neu1, int* actualContext, int e) {
// building neu1 for current window
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int c = start; c < contextWidth; c += step) {
// getting next context word
auto cContext = bContext[c + (e * contextWidth)];
// skipping padded values
if (cContext < 0)
continue;
// if (cContext >= vocabSize)
// throw std::runtime_error("ContextID can't be >= vocab size");
T *syn0word = syn0 + (cContext * vectorLength);
for (int i = 0; i < vectorLength; i++)
neu1[i] += syn0word[i];
atomicAdd(actualContext, 1);
}
}
template <typename T>
__global__ void arrangeNeuKernel(int vectorLength, T* neu1, T* infVector, int* actualContext) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < vectorLength && *actualContext > 0; i += step)
neu1[i] /= (*actualContext + int(infVector != nullptr));
}
template <typename T>
__global__ void applyShiftKernel(int* bContext, int* bLocker, T* syn0, T* neu1e, int contextWidth, int vectorLength, int e, int starter) {
auto step = blockDim.x * gridDim.x;
auto start = blockDim.x * blockIdx.x + threadIdx.x;
for (int c = starter + start; c < contextWidth; c += step) {
// getting context
auto cContext = bContext[c + (e * contextWidth)];
auto cLock = bLocker[c + (e * contextWidth)];
// skipping padded values
if (cContext < 0 || cLock == 1)
continue;
// if (cContext >= vocabSize)
// throw std::runtime_error("ContextID can't be > vocab size");
// one word from context
T *syn0word = syn0 + (cContext * vectorLength);
for (int i = 0; i < vectorLength; i++)
syn0word[i] += neu1e[i];
}
}
template <typename T>
void cbowBatchExec_(LaunchContext* lc, NDArray &s0, NDArray &s1, NDArray &s1n, void *vexpTable, void *vnegTable, void *vinfVector, NDArray &context, NDArray &lockedWords, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, NDArray &nLabels, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const bool trainWords, const int numThreads) {
const auto syn0 = reinterpret_cast<T*>(s0.specialBuffer()); //bufferAsT<T>();
const auto syn1 = reinterpret_cast<T*>(s1.specialBuffer()); //bufferAsT<T>();
const auto syn1Neg = reinterpret_cast<T*>(s1n.specialBuffer()); //bufferAsT<T>();
const auto expTable = reinterpret_cast<T*>(vexpTable);
const auto negTable = reinterpret_cast<T*>(vnegTable);
const auto infVector = reinterpret_cast<T*>(vinfVector);
auto stream = lc->getCudaStream();
indices.syncToHost();
codes.syncToHost();
negStarters.syncToHost();
context.syncToHost();
//const auto numThreads = omp_get_max_threads();
const auto idxShift = indices.isEmpty() ? 0 : indices.sizeAt(1);
const auto hsRounds = codes.isEmpty() ? 0 : codes.sizeAt(1);
const auto numTargets = context.sizeAt(0);
const int contextWidth = context.sizeAt(1);
//const auto bContext = reinterpret_cast<int*>(context.buffer()); //bufferAsT<int>();
const auto dContext = context.dataBuffer()->specialAsT<int>(); //bufferAsT<int>();
// const auto bLocker = reinterpret_cast<int*>(lockedWords.buffer()); //lockedWords.bufferAsT<int>();
const auto dLocker = lockedWords.dataBuffer()->specialAsT<int>(); //.specialBuffer()); //lockedWords.bufferAsT<int>();
const auto bIndices = indices.dataBuffer()->primaryAsT<int>(); //buffer());//AsT<int>();
const auto bCodes = codes.dataBuffer()->primaryAsT<int8_t>(); //reinterpret_cast<int8_t*>(codes.buffer()); //bufferAsT<int8_t>();
const auto bStarters = negStarters.dataBuffer()->primaryAsT<int>(); //reinterpret_cast<int*>(negStarters.buffer()); //AsT<int>();
const auto numIndices = indices.isEmpty() ? 0 : indices.sizeAt(1);
lr.syncToHost();
nLabels.syncToHost();
//PRAGMA_OMP_PARALLEL_FOR_ARGS(num_threads(numThreads) private(sneu1, sneu1e))
//NDArray neuVector('c', {vectorLength}, DataTypeUtils::fromT<T>());
// auto neuEVector = neuVector; //NDArrayFactory::create<T>('c', {vectorLength});
T* neu1; // = reinterpret_cast<T*>(neuVector.specialBuffer());// = vectorLength <= 600 ? sneu1 : new T[vectorLength];
T* neu1e; // = reinterpret_cast<T*>(neuVector.specialBuffer()); // = vectorLength <= 600 ? sneu1e : new T[vectorLength];
auto cerr = cudaMalloc(&neu1, sizeof(T) * vectorLength);
if (cerr) {
throw cuda_exception::build("Cannot allocate temp vector buffer", cerr);
}
cerr = cudaMalloc(&neu1e, sizeof(T) * vectorLength);
if (cerr) {
throw cuda_exception::build("Cannot allocate temp vector buffer", cerr);
}
int* actualContext;
cerr = cudaMalloc(&actualContext, sizeof(int));
if (cerr) {
throw cuda_exception::build("Cannot allocate counter buffer", cerr);
}
for (int e = 0; e < numTargets; e++) {
// auto err = cudaMalloc(&neu1, sizeof(T)* vectorLength);
// q err = cudaMalloc(&neu1e, sizeof(T)*vectorLength);
//
// // optionally we nullify temp arrays after successful (and on first) cycle
// memset(neu1, 0, sizeof(T) * vectorLength);
// memset(neu1e, 0, sizeof(T) * vectorLength);
auto alpha = lr.e<double>(e);
auto numLabels = nLabels.isEmpty() ? 0 : nLabels.e<int>(e);
// auto err = cudaMemset(actualContext, 0, sizeof(int));
// if (err) {
// printf("Cuda error %d\n", err); break;
// }
buildCurrentWindowKernel<T><<<1,1,128, *stream>>>(vocabSize, contextWidth, vectorLength, dContext, syn0, neu1, actualContext, e);
arrangeNeuKernel<T><<<1,1,128, *stream>>>(vectorLength, neu1, infVector, actualContext);
// hierarchic softmax step
if (!indices.isEmpty()) {
for (int i = 0; i < numIndices; i++) {
const int cIndex = bIndices[(e * numIndices) + i];
const int cCode = bCodes[(e * numIndices) + i];
// we're skipping padded values
if (cIndex < 0)
continue;
if (cIndex >= vocabSize)
throw std::runtime_error("Index can't be > vocab size");
hSoftmax_<T>(neu1, syn1 + (cIndex * vectorLength), expTable, neu1e, alpha, vectorLength, cCode, expLength, false, stream);
}
}
// negative sampling step
if (!negStarters.isEmpty() && nsRounds > 0) {
int irow = bStarters[e];
const int nsStarter = irow;
unsigned long long randomValue = nextRandom.e<Nd4jLong>(e);
for (int r = 0; r < nsRounds + 1; r++) {
// we're skipping rng on 0 step
if (r != 0) {
randomValue = randomValue * (unsigned long long) 25214903917 + 11;
auto idx = sd::math::nd4j_abs<Nd4jLong>((randomValue >> 16) % negLength);
irow = idx >= negLength ? -1 : static_cast<int>(negTable[idx]);
if (irow < 0 || irow >= vocabSize) irow = randomValue % (vocabSize - 1) + 1;
if (irow == nsStarter)
continue;
nSampling_<T>(neu1, s1n.bufferWithOffset(irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
} else {
nSampling_<T>(neu1, s1n.bufferWithOffset(irow * vectorLength), expTable, neu1e, alpha, vectorLength, r == 0 ? 1 : 0, expLength, infVector != nullptr, stream);
}
//nd4j_printf("Thread <%i>: syn0: [%i]; s1n: [%i];\n", omp_get_thread_num(), 0, irow);
}
}
// if we're skipping labels
int starter = trainWords == 1 ? 0 : contextWidth - numLabels;
// applying previously averaged results
applyShiftKernel<T><<<1,1,128, *stream>>>(dContext, dLocker, syn0, neu1e, contextWidth, vectorLength, e, starter);
// optionally release temp arrays
// if (vectorLength > 600) {
// }
}
cerr = cudaStreamSynchronize(*stream);
if (cerr) {
throw cuda_exception::build("Cannot syncronize stream before memory deallocation", cerr);
}
cerr = cudaFree(neu1);
if (cerr) {
throw cuda_exception::build("Cannot deallocate temp buffer1", cerr);
}
cerr = cudaFree(neu1e);
if (cerr) {
throw cuda_exception::build("Cannot deallocate temp buffer1 E", cerr);
}
cerr = cudaFree(actualContext);
if (cerr) {
throw cuda_exception::build("Cannot deallocate temp buffer1", cerr);
}
}
BUILD_SINGLE_TEMPLATE(template void cbowBatchExec_, (LaunchContext* lc, NDArray &s0, NDArray &s1, NDArray &s1n, void *vexpTable, void *vnegTable, void *vinfVector, NDArray &context, NDArray &lockedWords, NDArray &targets, NDArray &negStarters, NDArray &indices, NDArray &codes, NDArray &lr, NDArray &nextRandom, NDArray &nLabels, const int nsRounds, const int vocabSize, const int vectorLength, const int expLength, const int negLength, const bool trainWords, const int numThreads), FLOAT_TYPES);
void cbow(NDArray &syn0, NDArray &syn1, NDArray &syn1Neg, NDArray &expTable, NDArray &negTable, NDArray &target, NDArray &ngStarter, int nsRounds, NDArray &context, NDArray &lockedWords, NDArray &indices, NDArray &codes, NDArray &alpha, NDArray &randomValue, NDArray &numLabels, NDArray &inferenceVector, const bool trainWords, int numWorkers) {
auto xType = syn0.dataType();
auto lc = context.getContext();
indices.syncToHost();
NDArray::prepareSpecialUse({&syn0, &syn1, &syn1Neg, &expTable, &negTable, &target, &ngStarter}, {&context, &lockedWords, &indices, &codes, &alpha, &randomValue, &numLabels, &inferenceVector});
//auto stream = lc->getCudaStream();
if ((context.rankOf() == 0 || context.rankOf() == 1) && (indices.rankOf() == 1 || indices.rankOf() == 0)) {
// single round case
/*nd4j_printf("Row exec; ContextWidth: %i; LockedWords: %i; numLabels: %i; Train words: %i\n", (int) context.lengthOf(), (int) lockedWords.lengthOf(), numLabels.isEmpty() ? 0 : numLabels.e<int>(0), (int) trainWords);
if (context.lengthOf() == 2) {
context.printBuffer("context");
lockedWords.printBuffer("locked");
codes.printBuffer("codes");
indices.printBuffer("indices");
}*/
auto hsRounds = codes.lengthOf();
target.syncToHost();
numLabels.syncToHost();
target.syncToHost();
alpha.syncToHost();
numLabels.syncToHost();
codes.syncToHost();
negTable.syncToHost();
BUILD_SINGLE_SELECTOR(xType, cbow_, (lc, syn0.specialBuffer(), syn1.specialBuffer(), syn1Neg.specialBuffer(), expTable.specialBuffer(), negTable.buffer(), inferenceVector.specialBuffer(), target.isEmpty() ? -1 : target.e<int>(0), ngStarter.isEmpty() ? -1 : ngStarter.e<int>(0), reinterpret_cast<int *>(context.specialBuffer()), reinterpret_cast<int *>(lockedWords.specialBuffer()),reinterpret_cast<int *>(indices.buffer()), reinterpret_cast<int8_t *>(codes.buffer()), alpha.e<double>( 0), randomValue.e<Nd4jLong>(0), (int) context.lengthOf(), hsRounds, nsRounds, (int) syn0.sizeAt(0), (int) syn0.sizeAt(1), (int) expTable.lengthOf(), (int) negTable.lengthOf(), numLabels.isEmpty() ? 0 : numLabels.e<int>(0), trainWords), FLOAT_TYPES);
} else if (context.rankOf() == 2 && indices.rankOf() == 2) {
// batch mode
//nd4j_printf("Batch exec\n","");
BUILD_SINGLE_SELECTOR(xType, cbowBatchExec_, (lc, syn0, syn1, syn1Neg, expTable.specialBuffer(), negTable.specialBuffer(), nullptr, context, lockedWords, target, ngStarter, indices, codes, alpha, randomValue, numLabels, nsRounds, syn0.sizeAt(0), syn0.sizeAt(1), expTable.lengthOf(), negTable.isEmpty() ? 0 : negTable.lengthOf(), trainWords, numWorkers), FLOAT_TYPES);
} else
throw std::runtime_error("CBOW: context must have rank 0/1 or 2");
NDArray::registerSpecialUse({&syn0, &syn1, &syn1Neg, &expTable, &negTable, &target, &ngStarter}, {&context, &lockedWords, &indices, &codes, &alpha, &randomValue, &numLabels, &inferenceVector});
}
}
}
} |
34a44cfeedd21e3ccaa375658dbd42d23b223ead.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 University of Maryland, College Park
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file multi_proposal_target.cc
* \brief Proposal target layer
* \author Bharat Singh
*/
#include "./multi_proposal_target_mask-inl.h"
#include <set>
#include <math.h>
#include "unistd.h"
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "./operator_common.h"
#include "./mshadow_op.h"
#include <time.h>
#include <stdlib.h>
//============================
// Bounding Box Transform Utils
//============================
#define NUM_THREADS_NMS 1024
namespace mxnet {
namespace op {
namespace utils {
// filter box by set confidence to zero
// * height or width < rpn_min_size
inline void FilterBox(float *dets,
int num_dets, float min_size) {
#pragma omp parallel for num_threads(8)
for (int i = 0; i < num_dets; ++i) {
float iw = dets[5*i + 2] - dets[5*i] + 1.0f;
float ih = dets[5*i + 3] - dets[5*i + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[5*i+0] -= min_size / 2;
dets[5*i+1] -= min_size / 2;
dets[5*i+2] += min_size / 2;
dets[5*i+3] += min_size / 2;
dets[5*i+4] = -1.0f;
}
}
}
inline void _MakeAnchor(float w,
float h,
float x_ctr,
float y_ctr,
std::vector<float> *out_anchors) {
out_anchors->push_back(x_ctr - 0.5f * (w - 1.0f));
out_anchors->push_back(y_ctr - 0.5f * (h - 1.0f));
out_anchors->push_back(x_ctr + 0.5f * (w - 1.0f));
out_anchors->push_back(y_ctr + 0.5f * (h - 1.0f));
}
inline void _Transform(float scale,
float ratio,
const std::vector<float>& base_anchor,
std::vector<float> *out_anchors) {
float w = base_anchor[2] - base_anchor[0] + 1.0f;
float h = base_anchor[3] - base_anchor[1] + 1.0f;
float x_ctr = base_anchor[0] + 0.5 * (w - 1.0f);
float y_ctr = base_anchor[1] + 0.5 * (h - 1.0f);
float size = w * h;
float size_ratios = ::floor(size / ratio);
float new_w = ::floor(std::sqrt(size_ratios) + 0.5f) * scale;
float new_h = ::floor((new_w / scale * ratio) + 0.5f) * scale;
_MakeAnchor(new_w, new_h, x_ctr,
y_ctr, out_anchors);
}
// out_anchors must have shape (n, 5), where n is ratios.size() * scales.size()
inline void GenerateAnchors(const std::vector<float>& base_anchor,
const nnvm::Tuple<float>& ratios,
const nnvm::Tuple<float>& scales,
std::vector<float> *out_anchors) {
for (size_t j = 0; j < ratios.ndim(); ++j) {
for (size_t k = 0; k < scales.ndim(); ++k) {
_Transform(scales[k], ratios[j], base_anchor, out_anchors);
}
}
}
// greedily keep the max detections
__global__ void NonMaximumSuppressionCu(float* idets, int post_nms_top_n, int num_images,
int num_anchors, int width, int height, int max_gts, float* propsout,
float* valid_ranges, float* gt_boxes, float* ids, float* dets) {
int pre_nms_top_n = 6000;
int i = blockIdx.x;
int t = threadIdx.x;
int chip_anchors = height*width*num_anchors;
int multiplier = pre_nms_top_n;
int num_threads = blockDim.x;
int chip_index = i*chip_anchors;
for (int j = t; j < pre_nms_top_n; j = j + num_threads) {
dets[6*i*multiplier + 6*j] = idets[chip_index*6 + 6*(int)ids[chip_index + j]];
dets[6*i*multiplier + 6*j+1] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+1];
dets[6*i*multiplier + 6*j+2] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+2];
dets[6*i*multiplier + 6*j+3] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+3];
dets[6*i*multiplier + 6*j+4] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+4];
dets[6*i*multiplier + 6*j+5] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+5];
}
__syncthreads();
int vct = 0;
__shared__ int keeps[300]; //hard coded, sorry
chip_index = i*multiplier;
for (int j = chip_index; j < chip_index + pre_nms_top_n && vct < post_nms_top_n; j++) {
if (dets[6*j+4] == -1) {
continue;
}
float ix1 = dets[6*j];
float iy1 = dets[6*j+1];
float ix2 = dets[6*j+2];
float iy2 = dets[6*j+3];
float iarea = dets[6*j+5];
if (t == 0) {
keeps[vct] = j;
}
vct = vct + 1;
float xx1, xx2, yy1, yy2, w, h, inter, ovr;
for (int pind = j + 1 + t; pind < chip_index + pre_nms_top_n; pind = pind + num_threads) {
if (dets[6*pind + 4] == -1) {
continue;
}
xx1 = fmaxf(ix1, dets[6*pind]);
yy1 = fmaxf(iy1, dets[6*pind + 1]);
xx2 = fminf(ix2, dets[6*pind + 2]);
yy2 = fminf(iy2, dets[6*pind + 3]);
w = fmaxf(0.0f, xx2 - xx1 + 1.0f);
h = fmaxf(0.0f, yy2 - yy1 + 1.0f);
inter = w * h;
ovr = inter / (iarea + dets[6*pind+5] - inter);
if (ovr > 0.7) {
dets[6*pind + 4] = -1;
}
}
__syncthreads();
}
//set default values and assign gt boxes
if (t < post_nms_top_n) {
if (t < vct) {
propsout[5*(i*post_nms_top_n + t)] = i;
propsout[5*(i*post_nms_top_n + t) + 1] = dets[6*keeps[t]];
propsout[5*(i*post_nms_top_n + t) + 2] = dets[6*keeps[t]+1];
propsout[5*(i*post_nms_top_n + t) + 3] = dets[6*keeps[t]+2];
propsout[5*(i*post_nms_top_n + t) + 4] = dets[6*keeps[t]+3];
} else {
propsout[5*(i*post_nms_top_n + t)] = i;
propsout[5*(i*post_nms_top_n + t) + 1] = t % 100;
propsout[5*(i*post_nms_top_n + t) + 2] = t % 100;
propsout[5*(i*post_nms_top_n + t) + 3] = (t % 100) + 200;
propsout[5*(i*post_nms_top_n + t) + 4] = (t % 100) + 200;
}
if (gt_boxes[5*(i*max_gts + t) + 4] != -1 && t < max_gts) {
float x1 = gt_boxes[5*(i*max_gts + t)];
float y1 = gt_boxes[5*(i*max_gts + t)+1];
float x2 = gt_boxes[5*(i*max_gts + t)+2];
float y2 = gt_boxes[5*(i*max_gts + t)+3];
float area = (x2 - x1) * (y2 - y1);
if (area < valid_ranges[2*i + 1]*valid_ranges[2*i + 1] && area >= valid_ranges[2*i]*valid_ranges[2*i]) {
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 1] = x1;
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 2] = y1;
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 3] = x2;
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 4] = y2;
}
}
}
__syncthreads();
}
__global__ void getPropsCu(float* boxes,
float* deltas,
float* im_info,
float* anchorbuf,
float* scores,
float* valid_ranges,
int num_images,
int anchors,
int heights,
int widths,
int stride,
float* scorebuf,
float* scoreids) {
int num_anchors = anchors * heights * widths;
int t = blockDim.x * blockIdx.x + threadIdx.x;
if (t < num_images * num_anchors) {
int b = t / num_anchors;
int index = t % num_anchors;
int a = index / (heights*widths);
int mat = index % (heights*widths);
int w = mat % widths; //width index
int h = mat / widths; //height index
boxes[6*t] = anchorbuf[4*a] + w * stride;
boxes[6*t + 1] = anchorbuf[4*a+1] + h * stride;
boxes[6*t + 2] = anchorbuf[4*a+2] + w * stride;
boxes[6*t + 3] = anchorbuf[4*a+3] + h * stride;
boxes[6*t + 4] = scores[b*num_anchors*2 + ((anchors + a)*heights + h)*widths + w];
float width = boxes[6*t + 2] - boxes[6*t] + 1.0;
float height = boxes[6*t + 3] - boxes[6*t + 1] + 1.0;
float ctr_x = boxes[6*t + 0] + 0.5 * (width - 1.0);
float ctr_y = boxes[6*t + 1] + 0.5 * (height - 1.0);
float dx = deltas[b*num_anchors*4 + a*4*widths*heights + h*widths + w];
float dy = deltas[b*num_anchors*4 + (a*4 + 1)*widths*heights + h*widths + w];
float dw = deltas[b*num_anchors*4 + (a*4 + 2)*widths*heights + h*widths + w];
float dh = deltas[b*num_anchors*4 + (a*4 + 3)*widths*heights + h*widths + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0);
float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0);
float pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0);
float pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0);
pred_x1 = fmaxf(fminf(pred_x1, im_info[3*b+1] - 1.0f), 0.0f);
pred_y1 = fmaxf(fminf(pred_y1, im_info[3*b] - 1.0f), 0.0f);
pred_x2 = fmaxf(fminf(pred_x2, im_info[3*b+1] - 1.0f), 0.0f);
pred_y2 = fmaxf(fminf(pred_y2, im_info[3*b] - 1.0f), 0.0f);
boxes[6*t] = pred_x1;
boxes[6*t + 1] = pred_y1;
boxes[6*t + 2] = pred_x2;
boxes[6*t + 3] = pred_y2;
int min_size = 3;
if ((pred_y2 - pred_y1) < min_size && (pred_x2 - pred_x1) < min_size) {
boxes[6*t] -= min_size/2;
boxes[6*t + 1] -= min_size/2;
boxes[6*t + 2] += min_size/2;
boxes[6*t + 3] += min_size/2;
boxes[6*t + 4] = -1;
}
float area = (boxes[6*t + 2] - boxes[6*t]) * (boxes[6*t + 3] - boxes[6*t + 1]);
if (area >= valid_ranges[2*b+1] * valid_ranges[2*b+1] || area < valid_ranges[2*b]*valid_ranges[2*b]) {
boxes[6*t + 4] = -1;
}
boxes[6*t + 5] = area;
scorebuf[t] = boxes[6*t + 4];
scoreids[t] = index;
}
}
} // namespace utils
template<typename xpu>
class MultiProposalTargetMaskGPUOp : public Operator{
public:
float *labels;
float *bbox_targets;
float *bbox_weights;
float *crois;
float *gt_boxes;
float *out_pos_boxes;
float *out_pos_ids;
explicit MultiProposalTargetMaskGPUOp(MultiProposalTargetMaskParam param) {
this->param_ = param;
this->param_.workspace = (param_.workspace << 20) / sizeof(float);
this->crois = new float[300*param.batch_size*5];
this->labels = new float[300*param.batch_size];
this->gt_boxes = new float[param.max_gts*param.batch_size*5];
this->bbox_targets = new float[300*param.batch_size*4];
this->bbox_weights = new float[300*param.batch_size*4];
this->out_pos_boxes = new float[param.max_masks*param.batch_size*5];
this->out_pos_ids = new float[param.max_masks*param.batch_size];
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
CHECK_EQ(in_data.size(), 5);
CHECK_EQ(out_data.size(), 6);
using namespace mshadow;
using namespace mshadow::expr;
//clock_t t;
//t = clock();
Stream<gpu> *s = ctx.get_stream<gpu>();
Tensor<gpu, 4> tscores = in_data[proposal::kClsProb].get<gpu, 4, real_t>(s);
Tensor<gpu, 4> tbbox_deltas = in_data[proposal::kBBoxPred].get<gpu, 4, real_t>(s);
Tensor<gpu, 2> tim_info = in_data[proposal::kImInfo].get<gpu, 2, real_t>(s);
Tensor<gpu, 3> tgt_boxes = in_data[proposal::kGTBoxes].get<gpu, 3, real_t>(s);
Tensor<gpu, 2> tvalid_ranges = in_data[proposal::kValidRanges].get<gpu, 2, real_t>(s);
int max_gts = param_.max_gts;
Tensor<gpu, 2> rois = out_data[proposal::kRoIs].get<gpu, 2, real_t>(s);
int rpn_post_nms_top_n = param_.rpn_post_nms_top_n;
int num_images = tbbox_deltas.size(0);
int num_anchors = tbbox_deltas.size(1) / 4;
int height = tbbox_deltas.size(2);
int width = tbbox_deltas.size(3);
int count_anchors = num_anchors*height*width;
int total_anchors = count_anchors * num_images;
int bufsize = (total_anchors*8 + num_images*rpn_post_nms_top_n*5 + num_anchors*4)*sizeof(float);
Tensor<gpu, 1> workspace = ctx.requested[proposal::kTempSpace].get_space_typed<gpu, 1, float>(Shape1(bufsize), s);
int pre_nms_top_n = 6000;
float* propbuf = workspace.dptr_;
float* scorebuf = workspace.dptr_ + total_anchors*6;
float* idbuf = workspace.dptr_ + total_anchors*7;
float* detbuf = workspace.dptr_ + total_anchors*8;
float* anchorbuf = workspace.dptr_ + total_anchors*8 + num_images * 6 * pre_nms_top_n;
std::vector<float> base_anchor(4);
//usleep(20000000);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios,
param_.scales,
&anchors);
unsigned int size = num_anchors*4*sizeof(float);
hipMemcpy(anchorbuf, &anchors[0], size, hipMemcpyHostToDevice);
//call cuda kernel
int threadsPerBlock = NUM_THREADS_NMS;
int numblocks = (total_anchors/threadsPerBlock) + 1;
hipLaunchKernelGGL(( utils::getPropsCu), dim3(numblocks), dim3(threadsPerBlock), 0, 0, propbuf, tbbox_deltas.dptr_, tim_info.dptr_, anchorbuf, tscores.dptr_,
tvalid_ranges.dptr_, num_images, num_anchors, height, width, param_.feature_stride,
scorebuf, idbuf);
std::vector <float> tmp(total_anchors);
std::vector<float> ids(total_anchors);
hipDeviceSynchronize();
hipMemcpy(&tmp[0], scorebuf, sizeof(float) * num_images * count_anchors, hipMemcpyDeviceToHost);
#pragma omp parallel for num_threads(8)
for (int i = 0; i < total_anchors; i++) {
ids[i] = (float)(i % count_anchors);
}
#pragma omp parallel for num_threads(8)
for (int i = 0; i < num_images; i++) {
float basep = count_anchors*i;
std::sort(ids.begin() + i*count_anchors, ids.begin() + (i+1)*count_anchors,
[&tmp, basep](float i1, float i2) {
return tmp[(int)i1 + basep] > tmp[(int)i2 + basep];
});
}
hipMemcpy(idbuf, &ids[0], sizeof(float) * num_images * count_anchors, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( utils::NonMaximumSuppressionCu), dim3(num_images), dim3(threadsPerBlock), 0, 0, propbuf, rpn_post_nms_top_n, num_images,
num_anchors, width, height, max_gts, rois.dptr_,
tvalid_ranges.dptr_, tgt_boxes.dptr_, idbuf, detbuf);
hipDeviceSynchronize();
hipError_t error;
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipMemcpy(crois, rois.dptr_, 5*rpn_post_nms_top_n*num_images*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(gt_boxes, tgt_boxes.dptr_, 5*max_gts*num_images*sizeof(float), hipMemcpyDeviceToHost);
std::vector <int> numgts_per_image(num_images);
for (int i = 0; i < num_images; i++) {
numgts_per_image[i] = 0;
for (int j = 0; j < max_gts; j++) {
if (gt_boxes[i*max_gts*5 + j*5 + 4] != -1) {
numgts_per_image[i]++;
}
}
}
#pragma omp parallel for num_threads(8)
for (int i = 0; i < num_images; i++) {
for (int j = 0; j < rpn_post_nms_top_n; j++) {
int basepos = rpn_post_nms_top_n*i + j;
labels[basepos] = 0;
bbox_targets[4*basepos] = 1.0;
bbox_targets[4*basepos + 1] = 1.0;
bbox_targets[4*basepos + 2] = 1.0;
bbox_targets[4*basepos + 3] = 1.0;
bbox_weights[4*basepos] = 0.0;
bbox_weights[4*basepos + 1] = 0.0;
bbox_weights[4*basepos + 2] = 0.0;
bbox_weights[4*basepos + 3] = 0.0;
}
}
float *maxids = new float[num_images*rpn_post_nms_top_n];
for (int i = 0; i < num_images*rpn_post_nms_top_n; i++) {
maxids[i] = -1;
}
#pragma omp parallel for num_threads(8)
for (int imid = 0; imid < num_images; imid++) {
int tpct = 0;
int num_gts_this_image = numgts_per_image[imid];
//std::cout << "gtc " << num_gts_this_image << std::endl;
int props_this_batch = rpn_post_nms_top_n;
if (num_gts_this_image > 0) {
float *overlaps = new float[props_this_batch * num_gts_this_image];
float *max_overlaps = new float[props_this_batch];
for (int i = 0; i < props_this_batch; i++) {
max_overlaps[i] = 0;
}
float *max_overlap_ids = new float[props_this_batch];
std::set <int> positive_label_ids;
for (int i = 0; i < props_this_batch; i++) {
max_overlap_ids[i] = 0;
}
for (int i = props_this_batch; i < rpn_post_nms_top_n; i++) {
labels[imid*rpn_post_nms_top_n + i] = -1;
}
//get overlaps, maximum overlaps and gt labels
for (int i = 0; i < numgts_per_image[imid]; i++) {
float x1 = gt_boxes[imid*5*max_gts + i*5];
float y1 = gt_boxes[imid*5*max_gts + i*5 + 1];
float x2 = gt_boxes[imid*5*max_gts + i*5 + 2];
float y2 = gt_boxes[imid*5*max_gts + i*5 + 3];
int pbase;
float a1 = (x2 - x1) * (y2 - y1);
float xx1, yy1, xx2, yy2, w, h, inter, ovr, a2;
for (int j = 0; j < props_this_batch; j++) {
pbase = rpn_post_nms_top_n*imid + j;
xx1 = ::max(x1, crois[pbase*5 + 1]);
yy1 = ::max(y1, crois[pbase*5 + 2]);
xx2 = ::min(x2, crois[pbase*5 + 3]);
yy2 = ::min(y2, crois[pbase*5 + 4]);
w = ::max(0.0f, xx2 - xx1 + 1.0f);
h = ::max(0.0f, yy2 - yy1 + 1.0f);
a2 = (crois[pbase*5 + 3] - crois[pbase*5 + 1]) * (crois[pbase*5 + 4] - crois[pbase*5 + 2]);
inter = w * h;
ovr = inter / (a1 + a2 - inter);
overlaps[i*num_gts_this_image + j] = ovr;
if (overlaps[i*num_gts_this_image + j] > max_overlaps[j] && overlaps[i*num_gts_this_image + j] > 0.5) {
max_overlaps[j] = overlaps[i*num_gts_this_image + j];
max_overlap_ids[j] = i;
//set labels for positive proposals
labels[imid*rpn_post_nms_top_n + j] = gt_boxes[imid*5*max_gts + i*5 + 4];
positive_label_ids.insert(j);
tpct = tpct + 1;
}
}
}
//p is for proposal and g is for gt, cx is x center and w,h is width and height
int pid, gtid;
float gx1, gx2, gy1, gy2, px1, px2, py1, py2;
float gcx, gcy, gw, gh, pcx, pcy, pw, ph;
//generate bbox targets for the positive labels
for (auto it = positive_label_ids.begin(); it !=positive_label_ids.end(); it++) {
pid = *it;
int baseid = (imid*rpn_post_nms_top_n + pid);
bbox_weights[baseid*4] = 1;
bbox_weights[baseid*4+1] = 1;
bbox_weights[baseid*4+2] = 1;
bbox_weights[baseid*4+3] = 1;
gtid = max_overlap_ids[pid];
maxids[baseid] = gtid;
gx1 = gt_boxes[imid*5*max_gts + gtid*5];
gy1 = gt_boxes[imid*5*max_gts + gtid*5 + 1];
gx2 = gt_boxes[imid*5*max_gts + gtid*5 + 2];
gy2 = gt_boxes[imid*5*max_gts + gtid*5 + 3];
gw = gx2 - gx1 + 1;
gh = gy2 - gy1 + 1;
gcx = gx1 + gw*0.5;
gcy = gy1 + gh*0.5;
px1 = crois[baseid*5 + 1];
py1 = crois[baseid*5 + 2];
px2 = crois[baseid*5 + 3];
py2 = crois[baseid*5 + 4];
pw = px2 - px1 + 1;
ph = py2 - py1 + 1;
pcx = px1 + (pw-1)*0.5;
pcy = py1 + (ph-1)*0.5;
bbox_targets[4*baseid] = 10 * (gcx - pcx) / (pw + 1e-7);
bbox_targets[4*baseid+1] = 10 * (gcy - pcy) / (ph + 1e-7);
bbox_targets[4*baseid+2] = 5 * log(gw/(pw + 1e-7));
bbox_targets[4*baseid+3] = 5 * log(gh/(ph + 1e-7));
}
delete [] max_overlap_ids;
delete [] overlaps;
delete [] max_overlaps;
}
}
int mask_ct = 0;
for (int i = 0; i < num_images*300; i++) {
if (labels[i] > 0 && mask_ct < num_images * param_.max_masks) {
out_pos_boxes[5*mask_ct] = crois[5*i];
out_pos_boxes[5*mask_ct+1] = crois[5*i+1];
out_pos_boxes[5*mask_ct+2] = crois[5*i+2];
out_pos_boxes[5*mask_ct+3] = crois[5*i+3];
out_pos_boxes[5*mask_ct+4] = crois[5*i+4];
out_pos_ids[mask_ct] = maxids[i];
mask_ct++;
}
}
for (int i = mask_ct; i < num_images*param_.max_masks; i++) {
out_pos_boxes[5*i] = i % num_images;
out_pos_boxes[5*i+1] = i % 200;
out_pos_boxes[5*i+2] = i % 200;
out_pos_boxes[5*i+3] = i % 200 + 100;
out_pos_boxes[5*i+4] = i % 200 + 100;
out_pos_ids[i] = -1;
}
delete [] maxids;
Stream<gpu> *so = ctx.get_stream<gpu>();
Tensor<gpu, 2> olabels = out_data[proposal::kLabels].get<gpu, 2, real_t>(so);
Tensor<gpu, 2> obbox_targets = out_data[proposal::kBboxTarget].get<gpu, 2, real_t>(so);
Tensor<gpu, 2> obbox_weights = out_data[proposal::kBboxWeight].get<gpu, 2, real_t>(so);
Tensor<gpu, 2> omaskrois = out_data[proposal::kMaskRoIs].get<gpu, 2, real_t>(s);
Tensor<gpu, 2> omaskids = out_data[proposal::kMaskIds].get<gpu, 2, real_t>(s);
hipMemcpy(omaskrois.dptr_, out_pos_boxes, sizeof(float) * num_images * param_.max_masks * 5, hipMemcpyHostToDevice);
hipMemcpy(omaskids.dptr_, out_pos_ids, sizeof(float) * num_images * param_.max_masks, hipMemcpyHostToDevice);
hipMemcpy(olabels.dptr_, labels, sizeof(float) * num_images*300, hipMemcpyHostToDevice);
hipMemcpy(obbox_targets.dptr_, bbox_targets, 4*sizeof(float) * num_images*300, hipMemcpyHostToDevice);
hipMemcpy(obbox_weights.dptr_, bbox_weights, 4*sizeof(float) * num_images*300, hipMemcpyHostToDevice);
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 5);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 3> ggt_boxes = in_grad[proposal::kGTBoxes].get<xpu, 3, real_t>(s);
Tensor<xpu, 2> gvalid_ranges = in_grad[proposal::kValidRanges].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
Assign(ggt_boxes, req[proposal::kGTBoxes], 0);
Assign(gvalid_ranges, req[proposal::kValidRanges], 0);
}
private:
MultiProposalTargetMaskParam param_;
}; // class MultiProposalOp
template<>
Operator *CreateOp<gpu>(MultiProposalTargetMaskParam param) {
return new MultiProposalTargetMaskGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| 34a44cfeedd21e3ccaa375658dbd42d23b223ead.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2018 University of Maryland, College Park
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file multi_proposal_target.cc
* \brief Proposal target layer
* \author Bharat Singh
*/
#include "./multi_proposal_target_mask-inl.h"
#include <set>
#include <math.h>
#include "unistd.h"
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "./operator_common.h"
#include "./mshadow_op.h"
#include <time.h>
#include <stdlib.h>
//============================
// Bounding Box Transform Utils
//============================
#define NUM_THREADS_NMS 1024
namespace mxnet {
namespace op {
namespace utils {
// filter box by set confidence to zero
// * height or width < rpn_min_size
inline void FilterBox(float *dets,
int num_dets, float min_size) {
#pragma omp parallel for num_threads(8)
for (int i = 0; i < num_dets; ++i) {
float iw = dets[5*i + 2] - dets[5*i] + 1.0f;
float ih = dets[5*i + 3] - dets[5*i + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[5*i+0] -= min_size / 2;
dets[5*i+1] -= min_size / 2;
dets[5*i+2] += min_size / 2;
dets[5*i+3] += min_size / 2;
dets[5*i+4] = -1.0f;
}
}
}
inline void _MakeAnchor(float w,
float h,
float x_ctr,
float y_ctr,
std::vector<float> *out_anchors) {
out_anchors->push_back(x_ctr - 0.5f * (w - 1.0f));
out_anchors->push_back(y_ctr - 0.5f * (h - 1.0f));
out_anchors->push_back(x_ctr + 0.5f * (w - 1.0f));
out_anchors->push_back(y_ctr + 0.5f * (h - 1.0f));
}
inline void _Transform(float scale,
float ratio,
const std::vector<float>& base_anchor,
std::vector<float> *out_anchors) {
float w = base_anchor[2] - base_anchor[0] + 1.0f;
float h = base_anchor[3] - base_anchor[1] + 1.0f;
float x_ctr = base_anchor[0] + 0.5 * (w - 1.0f);
float y_ctr = base_anchor[1] + 0.5 * (h - 1.0f);
float size = w * h;
float size_ratios = std::floor(size / ratio);
float new_w = std::floor(std::sqrt(size_ratios) + 0.5f) * scale;
float new_h = std::floor((new_w / scale * ratio) + 0.5f) * scale;
_MakeAnchor(new_w, new_h, x_ctr,
y_ctr, out_anchors);
}
// out_anchors must have shape (n, 5), where n is ratios.size() * scales.size()
inline void GenerateAnchors(const std::vector<float>& base_anchor,
const nnvm::Tuple<float>& ratios,
const nnvm::Tuple<float>& scales,
std::vector<float> *out_anchors) {
for (size_t j = 0; j < ratios.ndim(); ++j) {
for (size_t k = 0; k < scales.ndim(); ++k) {
_Transform(scales[k], ratios[j], base_anchor, out_anchors);
}
}
}
// greedily keep the max detections
__global__ void NonMaximumSuppressionCu(float* idets, int post_nms_top_n, int num_images,
int num_anchors, int width, int height, int max_gts, float* propsout,
float* valid_ranges, float* gt_boxes, float* ids, float* dets) {
int pre_nms_top_n = 6000;
int i = blockIdx.x;
int t = threadIdx.x;
int chip_anchors = height*width*num_anchors;
int multiplier = pre_nms_top_n;
int num_threads = blockDim.x;
int chip_index = i*chip_anchors;
for (int j = t; j < pre_nms_top_n; j = j + num_threads) {
dets[6*i*multiplier + 6*j] = idets[chip_index*6 + 6*(int)ids[chip_index + j]];
dets[6*i*multiplier + 6*j+1] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+1];
dets[6*i*multiplier + 6*j+2] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+2];
dets[6*i*multiplier + 6*j+3] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+3];
dets[6*i*multiplier + 6*j+4] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+4];
dets[6*i*multiplier + 6*j+5] = idets[chip_index*6 + 6*(int)ids[chip_index + j]+5];
}
__syncthreads();
int vct = 0;
__shared__ int keeps[300]; //hard coded, sorry
chip_index = i*multiplier;
for (int j = chip_index; j < chip_index + pre_nms_top_n && vct < post_nms_top_n; j++) {
if (dets[6*j+4] == -1) {
continue;
}
float ix1 = dets[6*j];
float iy1 = dets[6*j+1];
float ix2 = dets[6*j+2];
float iy2 = dets[6*j+3];
float iarea = dets[6*j+5];
if (t == 0) {
keeps[vct] = j;
}
vct = vct + 1;
float xx1, xx2, yy1, yy2, w, h, inter, ovr;
for (int pind = j + 1 + t; pind < chip_index + pre_nms_top_n; pind = pind + num_threads) {
if (dets[6*pind + 4] == -1) {
continue;
}
xx1 = fmaxf(ix1, dets[6*pind]);
yy1 = fmaxf(iy1, dets[6*pind + 1]);
xx2 = fminf(ix2, dets[6*pind + 2]);
yy2 = fminf(iy2, dets[6*pind + 3]);
w = fmaxf(0.0f, xx2 - xx1 + 1.0f);
h = fmaxf(0.0f, yy2 - yy1 + 1.0f);
inter = w * h;
ovr = inter / (iarea + dets[6*pind+5] - inter);
if (ovr > 0.7) {
dets[6*pind + 4] = -1;
}
}
__syncthreads();
}
//set default values and assign gt boxes
if (t < post_nms_top_n) {
if (t < vct) {
propsout[5*(i*post_nms_top_n + t)] = i;
propsout[5*(i*post_nms_top_n + t) + 1] = dets[6*keeps[t]];
propsout[5*(i*post_nms_top_n + t) + 2] = dets[6*keeps[t]+1];
propsout[5*(i*post_nms_top_n + t) + 3] = dets[6*keeps[t]+2];
propsout[5*(i*post_nms_top_n + t) + 4] = dets[6*keeps[t]+3];
} else {
propsout[5*(i*post_nms_top_n + t)] = i;
propsout[5*(i*post_nms_top_n + t) + 1] = t % 100;
propsout[5*(i*post_nms_top_n + t) + 2] = t % 100;
propsout[5*(i*post_nms_top_n + t) + 3] = (t % 100) + 200;
propsout[5*(i*post_nms_top_n + t) + 4] = (t % 100) + 200;
}
if (gt_boxes[5*(i*max_gts + t) + 4] != -1 && t < max_gts) {
float x1 = gt_boxes[5*(i*max_gts + t)];
float y1 = gt_boxes[5*(i*max_gts + t)+1];
float x2 = gt_boxes[5*(i*max_gts + t)+2];
float y2 = gt_boxes[5*(i*max_gts + t)+3];
float area = (x2 - x1) * (y2 - y1);
if (area < valid_ranges[2*i + 1]*valid_ranges[2*i + 1] && area >= valid_ranges[2*i]*valid_ranges[2*i]) {
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 1] = x1;
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 2] = y1;
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 3] = x2;
propsout[5*(i*post_nms_top_n + post_nms_top_n - t - 1) + 4] = y2;
}
}
}
__syncthreads();
}
__global__ void getPropsCu(float* boxes,
float* deltas,
float* im_info,
float* anchorbuf,
float* scores,
float* valid_ranges,
int num_images,
int anchors,
int heights,
int widths,
int stride,
float* scorebuf,
float* scoreids) {
int num_anchors = anchors * heights * widths;
int t = blockDim.x * blockIdx.x + threadIdx.x;
if (t < num_images * num_anchors) {
int b = t / num_anchors;
int index = t % num_anchors;
int a = index / (heights*widths);
int mat = index % (heights*widths);
int w = mat % widths; //width index
int h = mat / widths; //height index
boxes[6*t] = anchorbuf[4*a] + w * stride;
boxes[6*t + 1] = anchorbuf[4*a+1] + h * stride;
boxes[6*t + 2] = anchorbuf[4*a+2] + w * stride;
boxes[6*t + 3] = anchorbuf[4*a+3] + h * stride;
boxes[6*t + 4] = scores[b*num_anchors*2 + ((anchors + a)*heights + h)*widths + w];
float width = boxes[6*t + 2] - boxes[6*t] + 1.0;
float height = boxes[6*t + 3] - boxes[6*t + 1] + 1.0;
float ctr_x = boxes[6*t + 0] + 0.5 * (width - 1.0);
float ctr_y = boxes[6*t + 1] + 0.5 * (height - 1.0);
float dx = deltas[b*num_anchors*4 + a*4*widths*heights + h*widths + w];
float dy = deltas[b*num_anchors*4 + (a*4 + 1)*widths*heights + h*widths + w];
float dw = deltas[b*num_anchors*4 + (a*4 + 2)*widths*heights + h*widths + w];
float dh = deltas[b*num_anchors*4 + (a*4 + 3)*widths*heights + h*widths + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0);
float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0);
float pred_x2 = pred_ctr_x + 0.5 * (pred_w - 1.0);
float pred_y2 = pred_ctr_y + 0.5 * (pred_h - 1.0);
pred_x1 = fmaxf(fminf(pred_x1, im_info[3*b+1] - 1.0f), 0.0f);
pred_y1 = fmaxf(fminf(pred_y1, im_info[3*b] - 1.0f), 0.0f);
pred_x2 = fmaxf(fminf(pred_x2, im_info[3*b+1] - 1.0f), 0.0f);
pred_y2 = fmaxf(fminf(pred_y2, im_info[3*b] - 1.0f), 0.0f);
boxes[6*t] = pred_x1;
boxes[6*t + 1] = pred_y1;
boxes[6*t + 2] = pred_x2;
boxes[6*t + 3] = pred_y2;
int min_size = 3;
if ((pred_y2 - pred_y1) < min_size && (pred_x2 - pred_x1) < min_size) {
boxes[6*t] -= min_size/2;
boxes[6*t + 1] -= min_size/2;
boxes[6*t + 2] += min_size/2;
boxes[6*t + 3] += min_size/2;
boxes[6*t + 4] = -1;
}
float area = (boxes[6*t + 2] - boxes[6*t]) * (boxes[6*t + 3] - boxes[6*t + 1]);
if (area >= valid_ranges[2*b+1] * valid_ranges[2*b+1] || area < valid_ranges[2*b]*valid_ranges[2*b]) {
boxes[6*t + 4] = -1;
}
boxes[6*t + 5] = area;
scorebuf[t] = boxes[6*t + 4];
scoreids[t] = index;
}
}
} // namespace utils
template<typename xpu>
class MultiProposalTargetMaskGPUOp : public Operator{
public:
float *labels;
float *bbox_targets;
float *bbox_weights;
float *crois;
float *gt_boxes;
float *out_pos_boxes;
float *out_pos_ids;
explicit MultiProposalTargetMaskGPUOp(MultiProposalTargetMaskParam param) {
this->param_ = param;
this->param_.workspace = (param_.workspace << 20) / sizeof(float);
this->crois = new float[300*param.batch_size*5];
this->labels = new float[300*param.batch_size];
this->gt_boxes = new float[param.max_gts*param.batch_size*5];
this->bbox_targets = new float[300*param.batch_size*4];
this->bbox_weights = new float[300*param.batch_size*4];
this->out_pos_boxes = new float[param.max_masks*param.batch_size*5];
this->out_pos_ids = new float[param.max_masks*param.batch_size];
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
CHECK_EQ(in_data.size(), 5);
CHECK_EQ(out_data.size(), 6);
using namespace mshadow;
using namespace mshadow::expr;
//clock_t t;
//t = clock();
Stream<gpu> *s = ctx.get_stream<gpu>();
Tensor<gpu, 4> tscores = in_data[proposal::kClsProb].get<gpu, 4, real_t>(s);
Tensor<gpu, 4> tbbox_deltas = in_data[proposal::kBBoxPred].get<gpu, 4, real_t>(s);
Tensor<gpu, 2> tim_info = in_data[proposal::kImInfo].get<gpu, 2, real_t>(s);
Tensor<gpu, 3> tgt_boxes = in_data[proposal::kGTBoxes].get<gpu, 3, real_t>(s);
Tensor<gpu, 2> tvalid_ranges = in_data[proposal::kValidRanges].get<gpu, 2, real_t>(s);
int max_gts = param_.max_gts;
Tensor<gpu, 2> rois = out_data[proposal::kRoIs].get<gpu, 2, real_t>(s);
int rpn_post_nms_top_n = param_.rpn_post_nms_top_n;
int num_images = tbbox_deltas.size(0);
int num_anchors = tbbox_deltas.size(1) / 4;
int height = tbbox_deltas.size(2);
int width = tbbox_deltas.size(3);
int count_anchors = num_anchors*height*width;
int total_anchors = count_anchors * num_images;
int bufsize = (total_anchors*8 + num_images*rpn_post_nms_top_n*5 + num_anchors*4)*sizeof(float);
Tensor<gpu, 1> workspace = ctx.requested[proposal::kTempSpace].get_space_typed<gpu, 1, float>(Shape1(bufsize), s);
int pre_nms_top_n = 6000;
float* propbuf = workspace.dptr_;
float* scorebuf = workspace.dptr_ + total_anchors*6;
float* idbuf = workspace.dptr_ + total_anchors*7;
float* detbuf = workspace.dptr_ + total_anchors*8;
float* anchorbuf = workspace.dptr_ + total_anchors*8 + num_images * 6 * pre_nms_top_n;
std::vector<float> base_anchor(4);
//usleep(20000000);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios,
param_.scales,
&anchors);
unsigned int size = num_anchors*4*sizeof(float);
cudaMemcpy(anchorbuf, &anchors[0], size, cudaMemcpyHostToDevice);
//call cuda kernel
int threadsPerBlock = NUM_THREADS_NMS;
int numblocks = (total_anchors/threadsPerBlock) + 1;
utils::getPropsCu<<<numblocks, threadsPerBlock>>>(propbuf, tbbox_deltas.dptr_, tim_info.dptr_, anchorbuf, tscores.dptr_,
tvalid_ranges.dptr_, num_images, num_anchors, height, width, param_.feature_stride,
scorebuf, idbuf);
std::vector <float> tmp(total_anchors);
std::vector<float> ids(total_anchors);
cudaDeviceSynchronize();
cudaMemcpy(&tmp[0], scorebuf, sizeof(float) * num_images * count_anchors, cudaMemcpyDeviceToHost);
#pragma omp parallel for num_threads(8)
for (int i = 0; i < total_anchors; i++) {
ids[i] = (float)(i % count_anchors);
}
#pragma omp parallel for num_threads(8)
for (int i = 0; i < num_images; i++) {
float basep = count_anchors*i;
std::sort(ids.begin() + i*count_anchors, ids.begin() + (i+1)*count_anchors,
[&tmp, basep](float i1, float i2) {
return tmp[(int)i1 + basep] > tmp[(int)i2 + basep];
});
}
cudaMemcpy(idbuf, &ids[0], sizeof(float) * num_images * count_anchors, cudaMemcpyHostToDevice);
utils::NonMaximumSuppressionCu<<<num_images, threadsPerBlock>>>(propbuf, rpn_post_nms_top_n, num_images,
num_anchors, width, height, max_gts, rois.dptr_,
tvalid_ranges.dptr_, tgt_boxes.dptr_, idbuf, detbuf);
cudaDeviceSynchronize();
cudaError_t error;
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(crois, rois.dptr_, 5*rpn_post_nms_top_n*num_images*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(gt_boxes, tgt_boxes.dptr_, 5*max_gts*num_images*sizeof(float), cudaMemcpyDeviceToHost);
std::vector <int> numgts_per_image(num_images);
for (int i = 0; i < num_images; i++) {
numgts_per_image[i] = 0;
for (int j = 0; j < max_gts; j++) {
if (gt_boxes[i*max_gts*5 + j*5 + 4] != -1) {
numgts_per_image[i]++;
}
}
}
#pragma omp parallel for num_threads(8)
for (int i = 0; i < num_images; i++) {
for (int j = 0; j < rpn_post_nms_top_n; j++) {
int basepos = rpn_post_nms_top_n*i + j;
labels[basepos] = 0;
bbox_targets[4*basepos] = 1.0;
bbox_targets[4*basepos + 1] = 1.0;
bbox_targets[4*basepos + 2] = 1.0;
bbox_targets[4*basepos + 3] = 1.0;
bbox_weights[4*basepos] = 0.0;
bbox_weights[4*basepos + 1] = 0.0;
bbox_weights[4*basepos + 2] = 0.0;
bbox_weights[4*basepos + 3] = 0.0;
}
}
float *maxids = new float[num_images*rpn_post_nms_top_n];
for (int i = 0; i < num_images*rpn_post_nms_top_n; i++) {
maxids[i] = -1;
}
#pragma omp parallel for num_threads(8)
for (int imid = 0; imid < num_images; imid++) {
int tpct = 0;
int num_gts_this_image = numgts_per_image[imid];
//std::cout << "gtc " << num_gts_this_image << std::endl;
int props_this_batch = rpn_post_nms_top_n;
if (num_gts_this_image > 0) {
float *overlaps = new float[props_this_batch * num_gts_this_image];
float *max_overlaps = new float[props_this_batch];
for (int i = 0; i < props_this_batch; i++) {
max_overlaps[i] = 0;
}
float *max_overlap_ids = new float[props_this_batch];
std::set <int> positive_label_ids;
for (int i = 0; i < props_this_batch; i++) {
max_overlap_ids[i] = 0;
}
for (int i = props_this_batch; i < rpn_post_nms_top_n; i++) {
labels[imid*rpn_post_nms_top_n + i] = -1;
}
//get overlaps, maximum overlaps and gt labels
for (int i = 0; i < numgts_per_image[imid]; i++) {
float x1 = gt_boxes[imid*5*max_gts + i*5];
float y1 = gt_boxes[imid*5*max_gts + i*5 + 1];
float x2 = gt_boxes[imid*5*max_gts + i*5 + 2];
float y2 = gt_boxes[imid*5*max_gts + i*5 + 3];
int pbase;
float a1 = (x2 - x1) * (y2 - y1);
float xx1, yy1, xx2, yy2, w, h, inter, ovr, a2;
for (int j = 0; j < props_this_batch; j++) {
pbase = rpn_post_nms_top_n*imid + j;
xx1 = std::max(x1, crois[pbase*5 + 1]);
yy1 = std::max(y1, crois[pbase*5 + 2]);
xx2 = std::min(x2, crois[pbase*5 + 3]);
yy2 = std::min(y2, crois[pbase*5 + 4]);
w = std::max(0.0f, xx2 - xx1 + 1.0f);
h = std::max(0.0f, yy2 - yy1 + 1.0f);
a2 = (crois[pbase*5 + 3] - crois[pbase*5 + 1]) * (crois[pbase*5 + 4] - crois[pbase*5 + 2]);
inter = w * h;
ovr = inter / (a1 + a2 - inter);
overlaps[i*num_gts_this_image + j] = ovr;
if (overlaps[i*num_gts_this_image + j] > max_overlaps[j] && overlaps[i*num_gts_this_image + j] > 0.5) {
max_overlaps[j] = overlaps[i*num_gts_this_image + j];
max_overlap_ids[j] = i;
//set labels for positive proposals
labels[imid*rpn_post_nms_top_n + j] = gt_boxes[imid*5*max_gts + i*5 + 4];
positive_label_ids.insert(j);
tpct = tpct + 1;
}
}
}
//p is for proposal and g is for gt, cx is x center and w,h is width and height
int pid, gtid;
float gx1, gx2, gy1, gy2, px1, px2, py1, py2;
float gcx, gcy, gw, gh, pcx, pcy, pw, ph;
//generate bbox targets for the positive labels
for (auto it = positive_label_ids.begin(); it !=positive_label_ids.end(); it++) {
pid = *it;
int baseid = (imid*rpn_post_nms_top_n + pid);
bbox_weights[baseid*4] = 1;
bbox_weights[baseid*4+1] = 1;
bbox_weights[baseid*4+2] = 1;
bbox_weights[baseid*4+3] = 1;
gtid = max_overlap_ids[pid];
maxids[baseid] = gtid;
gx1 = gt_boxes[imid*5*max_gts + gtid*5];
gy1 = gt_boxes[imid*5*max_gts + gtid*5 + 1];
gx2 = gt_boxes[imid*5*max_gts + gtid*5 + 2];
gy2 = gt_boxes[imid*5*max_gts + gtid*5 + 3];
gw = gx2 - gx1 + 1;
gh = gy2 - gy1 + 1;
gcx = gx1 + gw*0.5;
gcy = gy1 + gh*0.5;
px1 = crois[baseid*5 + 1];
py1 = crois[baseid*5 + 2];
px2 = crois[baseid*5 + 3];
py2 = crois[baseid*5 + 4];
pw = px2 - px1 + 1;
ph = py2 - py1 + 1;
pcx = px1 + (pw-1)*0.5;
pcy = py1 + (ph-1)*0.5;
bbox_targets[4*baseid] = 10 * (gcx - pcx) / (pw + 1e-7);
bbox_targets[4*baseid+1] = 10 * (gcy - pcy) / (ph + 1e-7);
bbox_targets[4*baseid+2] = 5 * log(gw/(pw + 1e-7));
bbox_targets[4*baseid+3] = 5 * log(gh/(ph + 1e-7));
}
delete [] max_overlap_ids;
delete [] overlaps;
delete [] max_overlaps;
}
}
int mask_ct = 0;
for (int i = 0; i < num_images*300; i++) {
if (labels[i] > 0 && mask_ct < num_images * param_.max_masks) {
out_pos_boxes[5*mask_ct] = crois[5*i];
out_pos_boxes[5*mask_ct+1] = crois[5*i+1];
out_pos_boxes[5*mask_ct+2] = crois[5*i+2];
out_pos_boxes[5*mask_ct+3] = crois[5*i+3];
out_pos_boxes[5*mask_ct+4] = crois[5*i+4];
out_pos_ids[mask_ct] = maxids[i];
mask_ct++;
}
}
for (int i = mask_ct; i < num_images*param_.max_masks; i++) {
out_pos_boxes[5*i] = i % num_images;
out_pos_boxes[5*i+1] = i % 200;
out_pos_boxes[5*i+2] = i % 200;
out_pos_boxes[5*i+3] = i % 200 + 100;
out_pos_boxes[5*i+4] = i % 200 + 100;
out_pos_ids[i] = -1;
}
delete [] maxids;
Stream<gpu> *so = ctx.get_stream<gpu>();
Tensor<gpu, 2> olabels = out_data[proposal::kLabels].get<gpu, 2, real_t>(so);
Tensor<gpu, 2> obbox_targets = out_data[proposal::kBboxTarget].get<gpu, 2, real_t>(so);
Tensor<gpu, 2> obbox_weights = out_data[proposal::kBboxWeight].get<gpu, 2, real_t>(so);
Tensor<gpu, 2> omaskrois = out_data[proposal::kMaskRoIs].get<gpu, 2, real_t>(s);
Tensor<gpu, 2> omaskids = out_data[proposal::kMaskIds].get<gpu, 2, real_t>(s);
cudaMemcpy(omaskrois.dptr_, out_pos_boxes, sizeof(float) * num_images * param_.max_masks * 5, cudaMemcpyHostToDevice);
cudaMemcpy(omaskids.dptr_, out_pos_ids, sizeof(float) * num_images * param_.max_masks, cudaMemcpyHostToDevice);
cudaMemcpy(olabels.dptr_, labels, sizeof(float) * num_images*300, cudaMemcpyHostToDevice);
cudaMemcpy(obbox_targets.dptr_, bbox_targets, 4*sizeof(float) * num_images*300, cudaMemcpyHostToDevice);
cudaMemcpy(obbox_weights.dptr_, bbox_weights, 4*sizeof(float) * num_images*300, cudaMemcpyHostToDevice);
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 5);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 3> ggt_boxes = in_grad[proposal::kGTBoxes].get<xpu, 3, real_t>(s);
Tensor<xpu, 2> gvalid_ranges = in_grad[proposal::kValidRanges].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
Assign(ggt_boxes, req[proposal::kGTBoxes], 0);
Assign(gvalid_ranges, req[proposal::kValidRanges], 0);
}
private:
MultiProposalTargetMaskParam param_;
}; // class MultiProposalOp
template<>
Operator *CreateOp<gpu>(MultiProposalTargetMaskParam param) {
return new MultiProposalTargetMaskGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
4fb5dce5c981890f4191aaf42ff0c09c3f13a51d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.cu"
__global__ void d_filter_BOX_GRAY(const uchar1* const src, unsigned char * const dst, int width, int height ) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
Neighbours3x3<uchar1> neighbours;
d_getNeighbours_8(src, width, height, x, y, neighbours);
dst[x+width*y] = d_avgNeighbours_8(neighbours);
}
void h_filter_BOX_GRAY(const uchar1 * src, unsigned char * dst, int width, int height) {
dim3 threadsPerBlock(16, 16, 1);
dim3 blocks(ceil(width/threadsPerBlock.x), ceil(height/threadsPerBlock.y), 1);
hipLaunchKernelGGL(( d_filter_BOX_GRAY) , dim3(blocks) , dim3(threadsPerBlock) , 0, 0, src, dst, width, height);
hipDeviceSynchronize();
}
__global__ void d_filter_MEDIAN_GRAY(const uchar1* const src, unsigned char * const dst, int width, int height ) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
dst[x+width*y] = 0;
}
void h_filter_MEDIAN_GRAY(const uchar1 * src, unsigned char * dst, int width, int height) {
dim3 threadsPerBlock(16, 16, 1);
dim3 blocks(ceil( (float) width/threadsPerBlock.x), ceil( (float) height/threadsPerBlock.y), 1);
hipLaunchKernelGGL(( d_filter_MEDIAN_GRAY) , dim3(blocks) , dim3(threadsPerBlock) , 0, 0, src, dst, width, height);
hipDeviceSynchronize();
} | 4fb5dce5c981890f4191aaf42ff0c09c3f13a51d.cu | #include "utils.cu"
__global__ void d_filter_BOX_GRAY(const uchar1* const src, unsigned char * const dst, int width, int height ) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
Neighbours3x3<uchar1> neighbours;
d_getNeighbours_8(src, width, height, x, y, neighbours);
dst[x+width*y] = d_avgNeighbours_8(neighbours);
}
void h_filter_BOX_GRAY(const uchar1 * src, unsigned char * dst, int width, int height) {
dim3 threadsPerBlock(16, 16, 1);
dim3 blocks(ceil(width/threadsPerBlock.x), ceil(height/threadsPerBlock.y), 1);
d_filter_BOX_GRAY <<< blocks , threadsPerBlock >>> (src, dst, width, height);
cudaDeviceSynchronize();
}
__global__ void d_filter_MEDIAN_GRAY(const uchar1* const src, unsigned char * const dst, int width, int height ) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
dst[x+width*y] = 0;
}
void h_filter_MEDIAN_GRAY(const uchar1 * src, unsigned char * dst, int width, int height) {
dim3 threadsPerBlock(16, 16, 1);
dim3 blocks(ceil( (float) width/threadsPerBlock.x), ceil( (float) height/threadsPerBlock.y), 1);
d_filter_MEDIAN_GRAY <<< blocks , threadsPerBlock >>> (src, dst, width, height);
cudaDeviceSynchronize();
} |
65912fe8f5a97ff4c348292348c4388724e4210f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "util/type_name.hpp"
#include "util/miscellany.cuh"
#include <kat/on_device/streams/printfing_ostream.cuh>
#include <kat/on_device/collaboration/block.cuh>
#include <kat/on_device/time.cuh>
#include <doctest.h>
#include <cuda/api_wrappers.hpp>
constexpr const auto num_grid_blocks { 2 };
constexpr const auto block_size { 2 };
constexpr const std::size_t stringstream_buffer_size { 50 };
KAT_DEV kat::stringstream& operator<<(kat::stringstream& os, const util::constexpr_string& arg)
{
return os << strf::range<const char*>(arg.begin(), arg.end());
}
namespace kernels {
__global__ void stream_different_types_to_stringstream()
{
char buff[40] = "char buffer original contents";
kat::stringstream ss(stringstream_buffer_size);
ss << "A string literal.\n";
ss << "A single character - the letter a: " << 'a' << "\n";
ss << "An array of of characters on the stack: \"" << buff << "\"\n";
ss << "Positive signed int literal: " << 123 << '\n';
ss << "Negative signed int literal: " << -456 << '\n';
ss << "A float-type value (1/3): " << ( ((float) 1) / 3 ) << '\n';
ss << "A double-type value (1/3): " << ( ((float) 1) / 3.0 ) << '\n';
// This is not supported:
// ss << "A non-latin, non-ASCII character: " << (char32_t) '' << '\n';
printf("The stringstream contents:\n%s", ss.c_str());
}
template <typename T>
__global__ void stream_to_stringstream_templated()
{
kat::stringstream ss(stringstream_buffer_size);
ss << "A default-initialized value of type T (" << util::type_name<T>() << "): " << T{} << '\n';
printf("The stringstream contents:\n%s", ss.c_str());
}
__global__ void use_formatting_functions()
{
kat::stringstream ss(stringstream_buffer_size);
auto width = 8;
ss << "No actual formatting, just wrapping in strf::fmt():\n";
ss << strf::fmt(123) << '\n';
ss << "Hexadecimal:\n";
ss << strf::hex(123) << '\n';
ss << "Octal:\n";
ss << strf::oct(123) << '\n';
ss << "Binary:\n";
ss << strf::bin(123) << '\n';
ss << "Set fill character, without setting width:\n";
ss << strf::fmt(123).fill('0') << '\n';
ss << "Set fill character, set width of " << width << ", right alignment; then a space and more text:\n";
ss << (strf::fmt(123).fill('0') > width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", left alignment; then a space more text:\n";
ss << (strf::fmt(123).fill('0') < width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", center alignment; then a space more text:\n";
ss << (strf::fmt(123).fill('0') ^ width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", internal fill using hex; then a space more text:\n";
ss << (strf::fmt(123).fill('0').hex() % width) << " and more text\n";
// TODO: More strf formatting functions
ss << "strf::right(0,2,'0') gives " << strf::right(0, 2, '0');
printf("The stringstream contents: \"%s\"\n\n", ss.c_str());
}
__global__ void use_stringstream()
{
kat::stringstream ss{stringstream_buffer_size}; // longer than some, but not all, of the strings we have here
ss << "A string literal";
printf("Position in the stream: %u. stream contents (enclosed in double-quotes): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
printf("Seeking to the beginning of the stream.\n");
ss.seekp(0);
printf("Position in the stream: %u. stream contents (enclosed in double-quotes, should be empty): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
}
__global__ void use_zero_initialized_stringstream()
{
kat::stringstream ss{0};
ss << "A string literal";
printf("Position in the stream: %u. stream contents (enclosed in double-quotes): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
printf("Seeking to the beginning of the stream.\n");
ss.seekp(0);
printf("Position in the stream: %u. stream contents (enclosed in double-quotes, should be empty): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
}
__global__ void use_printfing_ostream()
{
kat::printfing_ostream cout;
cout << "String literal 1 with newline - to be printed on call of .flush() method\n";
cout.flush();
kat::collaborative::block::barrier();
if (kat::linear_grid::grid_info::thread::index_in_block() == 0) {
printf("All threads in block %d have flushed cout.\n", blockIdx.x);
}
cout << "String literal 2 with newline - to be printed on use of flush manipulator\n";
cout << kat::flush;
kat::collaborative::block::barrier();
if (kat::linear_grid::grid_info::thread::index_in_block() == 0) {
printf("All threads in block %d have streamed the flush manipulator to their cout.\n", blockIdx.x);
}
cout << "String literal 3 with newline - to be printed on destruction\n";
}
__global__ void printfing_ostream_settings()
{
kat::printfing_ostream cout;
namespace gi = kat::linear_grid::grid_info;
cout << "Before any setting\n";
cout.flush();
// TODO: What if the text is big enough to cause recycling? That shouldn't matter, but we should try it.
cout.append_newline_on_flush();
cout << "SHOULD see \\n between threads' printouts of this sentence. ";
cout.flush();
cout.no_newline_on_flush();
cout << "SHOULD NOT see \\n between threads' printouts of this sentence. ";
cout.flush();
if (kat::linear_grid::grid_info::thread::is_first_in_grid()) {
cout << '\n';
cout.flush();
}
// This will just add a newline after the long paragraph of many threads' non-newline-terminated strings.
auto block_and_thread_gen = [](kat::stringstream& ss) {
ss << "Block " << gi::block::index() << ", Thread " << gi::thread::index_in_block() << ": ";
};
cout.set_prefix_generator(block_and_thread_gen);
cout << "A prefix with the thread and block number SHOULD appear before this sentence.\n";
cout.flush();
cout.no_prefix();
cout << "A prefix with the thread and block number SHOULD NOT appear before this sentence.\n";
cout.flush();
// resolution and identification
}
__global__ void stream_manipulators_into_printfing_ostream()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
cout << "Before any setting\n" << flush;
// TODO: What if the text is big enough to cause recycling? That shouldn't matter, but we should try it.
cout << kat::manipulators::newline_on_flush
<< "SHOULD see \\n between threads' printouts of this sentence. " << flush
<< kat::manipulators::no_newline_on_flush
<< "SHOULD NOT see \\n between threads' printouts of this sentence. " << flush;
if (kat::linear_grid::grid_info::thread::is_first_in_grid()) {
// This will just add a newline after the long paragraph of many threads' non-newline-terminated strings.
cout << kat::manipulators::endl;
}
auto block_and_thread_gen = [](kat::stringstream& ss) {
ss << "Block " << gi::block::index() << ", Thread " << gi::thread::index_in_block() << ": ";
};
cout << kat::manipulators::prefix(block_and_thread_gen)
<< "A prefix with the thread and block number SHOULD appear before this sentence.\n" << flush
<< kat::manipulators::no_prefix
<< "A prefix with the thread and block number SHOULD NOT appear before this sentence.\n" << flush;
// resolution and self-identification
// cout << strf::join_right(15,'*')("joined right") << '\n' << flush;
}
__device__ const char* to_string(kat::printfing_ostream::resolution res)
{
switch(res) {
case kat::printfing_ostream::resolution::thread : return "thread";
case kat::printfing_ostream::resolution::warp : return "warp";
case kat::printfing_ostream::resolution::block : return "block";
case kat::printfing_ostream::resolution::grid : return "grid";
}
return nullptr;
}
__global__ void print_at_different_resolutions()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::grid);
cout << "Printing at grid resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::warp);
cout << "Printing at warp resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::thread);
cout << "Printing at thread resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
}
__device__ void sipo_for_resolution(kat::printfing_ostream& os, kat::printfing_ostream::resolution res)
{
os
<< kat::manipulators::resolution(res)
<< kat::linear_grid::manipulators::identify
<< "Printing to a self-identifying ostream with resolution "
<< to_string(os.printing_resolution())
<< kat::endl;
}
__global__ void self_identifying_printfing_ostream()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
sipo_for_resolution(cout, kat::printfing_ostream::resolution::grid);
kat::sleep<kat::sleep_resolution::clock_cycles>(1e8);
sipo_for_resolution(cout, kat::printfing_ostream::resolution::block);
__syncthreads();
sipo_for_resolution(cout, kat::printfing_ostream::resolution::warp);
__syncthreads();
sipo_for_resolution(cout, kat::printfing_ostream::resolution::thread);
__syncthreads();
}
} // namespace kernels
TEST_SUITE("printing") {
TEST_CASE("use_stringstream")// INTEGER_TYPES)
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE("use_zero_initialized_stringstream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_zero_initialized_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE("stream_different_types_to_stringstream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_different_types_to_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE_TEMPLATE("stream_to_stringstream_templated", T, long long int, short)
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_to_stringstream_templated<T>, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("use_formatting_functions")
{
auto device { cuda::device::current::get() };
device.reset();
cuda::launch(kernels::use_formatting_functions, single_thread_launch_config());
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("use_printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("printfing_ostream_settings")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::printfing_ostream_settings, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("stream manipulators into printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_manipulators_into_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("print_at_different_resolutions")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::print_at_different_resolutions, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("self-identifying printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::self_identifying_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
} // TEST_SUITE("printing")
| 65912fe8f5a97ff4c348292348c4388724e4210f.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "util/type_name.hpp"
#include "util/miscellany.cuh"
#include <kat/on_device/streams/printfing_ostream.cuh>
#include <kat/on_device/collaboration/block.cuh>
#include <kat/on_device/time.cuh>
#include <doctest.h>
#include <cuda/api_wrappers.hpp>
constexpr const auto num_grid_blocks { 2 };
constexpr const auto block_size { 2 };
constexpr const std::size_t stringstream_buffer_size { 50 };
KAT_DEV kat::stringstream& operator<<(kat::stringstream& os, const util::constexpr_string& arg)
{
return os << strf::range<const char*>(arg.begin(), arg.end());
}
namespace kernels {
__global__ void stream_different_types_to_stringstream()
{
char buff[40] = "char buffer original contents";
kat::stringstream ss(stringstream_buffer_size);
ss << "A string literal.\n";
ss << "A single character - the letter a: " << 'a' << "\n";
ss << "An array of of characters on the stack: \"" << buff << "\"\n";
ss << "Positive signed int literal: " << 123 << '\n';
ss << "Negative signed int literal: " << -456 << '\n';
ss << "A float-type value (1/3): " << ( ((float) 1) / 3 ) << '\n';
ss << "A double-type value (1/3): " << ( ((float) 1) / 3.0 ) << '\n';
// This is not supported:
// ss << "A non-latin, non-ASCII character: " << (char32_t) 'ת' << '\n';
printf("The stringstream contents:\n%s", ss.c_str());
}
template <typename T>
__global__ void stream_to_stringstream_templated()
{
kat::stringstream ss(stringstream_buffer_size);
ss << "A default-initialized value of type T (" << util::type_name<T>() << "): " << T{} << '\n';
printf("The stringstream contents:\n%s", ss.c_str());
}
__global__ void use_formatting_functions()
{
kat::stringstream ss(stringstream_buffer_size);
auto width = 8;
ss << "No actual formatting, just wrapping in strf::fmt():\n";
ss << strf::fmt(123) << '\n';
ss << "Hexadecimal:\n";
ss << strf::hex(123) << '\n';
ss << "Octal:\n";
ss << strf::oct(123) << '\n';
ss << "Binary:\n";
ss << strf::bin(123) << '\n';
ss << "Set fill character, without setting width:\n";
ss << strf::fmt(123).fill('0') << '\n';
ss << "Set fill character, set width of " << width << ", right alignment; then a space and more text:\n";
ss << (strf::fmt(123).fill('0') > width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", left alignment; then a space more text:\n";
ss << (strf::fmt(123).fill('0') < width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", center alignment; then a space more text:\n";
ss << (strf::fmt(123).fill('0') ^ width) << " and more text\n";
ss << "Set fill character, set width of " << width << ", internal fill using hex; then a space more text:\n";
ss << (strf::fmt(123).fill('0').hex() % width) << " and more text\n";
// TODO: More strf formatting functions
ss << "strf::right(0,2,'0') gives " << strf::right(0, 2, '0');
printf("The stringstream contents: \"%s\"\n\n", ss.c_str());
}
__global__ void use_stringstream()
{
kat::stringstream ss{stringstream_buffer_size}; // longer than some, but not all, of the strings we have here
ss << "A string literal";
printf("Position in the stream: %u. stream contents (enclosed in double-quotes): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
printf("Seeking to the beginning of the stream.\n");
ss.seekp(0);
printf("Position in the stream: %u. stream contents (enclosed in double-quotes, should be empty): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
}
__global__ void use_zero_initialized_stringstream()
{
kat::stringstream ss{0};
ss << "A string literal";
printf("Position in the stream: %u. stream contents (enclosed in double-quotes): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
printf("Seeking to the beginning of the stream.\n");
ss.seekp(0);
printf("Position in the stream: %u. stream contents (enclosed in double-quotes, should be empty): \"%s\"\n", (unsigned) ss.tellp(), ss.c_str());
}
__global__ void use_printfing_ostream()
{
kat::printfing_ostream cout;
cout << "String literal 1 with newline - to be printed on call of .flush() method\n";
cout.flush();
kat::collaborative::block::barrier();
if (kat::linear_grid::grid_info::thread::index_in_block() == 0) {
printf("All threads in block %d have flushed cout.\n", blockIdx.x);
}
cout << "String literal 2 with newline - to be printed on use of flush manipulator\n";
cout << kat::flush;
kat::collaborative::block::barrier();
if (kat::linear_grid::grid_info::thread::index_in_block() == 0) {
printf("All threads in block %d have streamed the flush manipulator to their cout.\n", blockIdx.x);
}
cout << "String literal 3 with newline - to be printed on destruction\n";
}
__global__ void printfing_ostream_settings()
{
kat::printfing_ostream cout;
namespace gi = kat::linear_grid::grid_info;
cout << "Before any setting\n";
cout.flush();
// TODO: What if the text is big enough to cause recycling? That shouldn't matter, but we should try it.
cout.append_newline_on_flush();
cout << "SHOULD see \\n between threads' printouts of this sentence. ";
cout.flush();
cout.no_newline_on_flush();
cout << "SHOULD NOT see \\n between threads' printouts of this sentence. ";
cout.flush();
if (kat::linear_grid::grid_info::thread::is_first_in_grid()) {
cout << '\n';
cout.flush();
}
// This will just add a newline after the long paragraph of many threads' non-newline-terminated strings.
auto block_and_thread_gen = [](kat::stringstream& ss) {
ss << "Block " << gi::block::index() << ", Thread " << gi::thread::index_in_block() << ": ";
};
cout.set_prefix_generator(block_and_thread_gen);
cout << "A prefix with the thread and block number SHOULD appear before this sentence.\n";
cout.flush();
cout.no_prefix();
cout << "A prefix with the thread and block number SHOULD NOT appear before this sentence.\n";
cout.flush();
// resolution and identification
}
__global__ void stream_manipulators_into_printfing_ostream()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
cout << "Before any setting\n" << flush;
// TODO: What if the text is big enough to cause recycling? That shouldn't matter, but we should try it.
cout << kat::manipulators::newline_on_flush
<< "SHOULD see \\n between threads' printouts of this sentence. " << flush
<< kat::manipulators::no_newline_on_flush
<< "SHOULD NOT see \\n between threads' printouts of this sentence. " << flush;
if (kat::linear_grid::grid_info::thread::is_first_in_grid()) {
// This will just add a newline after the long paragraph of many threads' non-newline-terminated strings.
cout << kat::manipulators::endl;
}
auto block_and_thread_gen = [](kat::stringstream& ss) {
ss << "Block " << gi::block::index() << ", Thread " << gi::thread::index_in_block() << ": ";
};
cout << kat::manipulators::prefix(block_and_thread_gen)
<< "A prefix with the thread and block number SHOULD appear before this sentence.\n" << flush
<< kat::manipulators::no_prefix
<< "A prefix with the thread and block number SHOULD NOT appear before this sentence.\n" << flush;
// resolution and self-identification
// cout << strf::join_right(15,'*')("joined right") << '\n' << flush;
}
__device__ const char* to_string(kat::printfing_ostream::resolution res)
{
switch(res) {
case kat::printfing_ostream::resolution::thread : return "thread";
case kat::printfing_ostream::resolution::warp : return "warp";
case kat::printfing_ostream::resolution::block : return "block";
case kat::printfing_ostream::resolution::grid : return "grid";
}
return nullptr;
}
__global__ void print_at_different_resolutions()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::grid);
cout << "Printing at grid resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::warp);
cout << "Printing at warp resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
cout << kat::manipulators::resolution(kat::printfing_ostream::resolution::thread);
cout << "Printing at thread resolution. The printing thread is (" << blockIdx.x << "," << threadIdx.x << ")\n" << flush;
}
__device__ void sipo_for_resolution(kat::printfing_ostream& os, kat::printfing_ostream::resolution res)
{
os
<< kat::manipulators::resolution(res)
<< kat::linear_grid::manipulators::identify
<< "Printing to a self-identifying ostream with resolution "
<< to_string(os.printing_resolution())
<< kat::endl;
}
__global__ void self_identifying_printfing_ostream()
{
kat::printfing_ostream cout;
using kat::flush;
namespace gi = kat::linear_grid::grid_info;
sipo_for_resolution(cout, kat::printfing_ostream::resolution::grid);
kat::sleep<kat::sleep_resolution::clock_cycles>(1e8);
sipo_for_resolution(cout, kat::printfing_ostream::resolution::block);
__syncthreads();
sipo_for_resolution(cout, kat::printfing_ostream::resolution::warp);
__syncthreads();
sipo_for_resolution(cout, kat::printfing_ostream::resolution::thread);
__syncthreads();
}
} // namespace kernels
TEST_SUITE("printing") {
TEST_CASE("use_stringstream")// INTEGER_TYPES)
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE("use_zero_initialized_stringstream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_zero_initialized_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE("stream_different_types_to_stringstream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_different_types_to_stringstream, launch_config);
cuda::outstanding_error::ensure_none();
// TODO: We could redirect the standard output stream into a buffer before launching the kernel,
// then check the buffer contains what we want. However, this can probably be interfered with,
// so I'm not sure it's a good idea even in principle.
device.synchronize();
}
TEST_CASE_TEMPLATE("stream_to_stringstream_templated", T, long long int, short)
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_to_stringstream_templated<T>, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("use_formatting_functions")
{
auto device { cuda::device::current::get() };
device.reset();
cuda::launch(kernels::use_formatting_functions, single_thread_launch_config());
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("use_printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::use_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("printfing_ostream_settings")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::printfing_ostream_settings, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("stream manipulators into printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::stream_manipulators_into_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("print_at_different_resolutions")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::print_at_different_resolutions, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
TEST_CASE("self-identifying printfing_ostream")
{
auto device { cuda::device::current::get() };
device.reset();
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
cuda::launch(kernels::self_identifying_printfing_ostream, launch_config);
cuda::outstanding_error::ensure_none();
device.synchronize();
}
} // TEST_SUITE("printing")
|
29f5d7c9d492907da9db16d96122e7ea417ca445.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#define N 2//64
__device__ int bar () {
return 0;
}
__global__ void foo() {
assert (bar() ==0);
}
int main(){
hipLaunchKernelGGL(( foo), dim3(1), dim3(N), 0, 0, );
//ESBMC_verify_kernel(foo, 1, N);
hipDeviceSynchronize();
return 0;
}
| 29f5d7c9d492907da9db16d96122e7ea417ca445.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__device__ int bar () {
return 0;
}
__global__ void foo() {
assert (bar() ==0);
}
int main(){
foo<<<1, N>>>();
//ESBMC_verify_kernel(foo, 1, N);
cudaThreadSynchronize();
return 0;
}
|
249a84e9118b2d770a4b1c74cac104b044f474dd.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
#include <stdio.h>
#ifdef ENABLE_NVTOOLS
#include <nvToolsExtCuda.h>
#endif
#include "CudaUtils.h"
//----------------------------------------------------------------------------------------
void set_device_array_async_T(void *data, int value, const size_t ndata, hipStream_t stream, const size_t sizeofT) {
cudaCheck(hipMemsetAsync(data, value, sizeofT*ndata, stream));
}
void set_device_array_T(void *data, int value, const size_t ndata, const size_t sizeofT) {
cudaCheck(hipMemset(data, value, sizeofT*ndata));
}
//----------------------------------------------------------------------------------------
//
// Allocate gpu memory
// pp = memory pointer
// len = length of the array
//
void allocate_device_T(void **pp, const size_t len, const size_t sizeofT) {
cudaCheck(hipMalloc(pp, sizeofT*len));
}
//----------------------------------------------------------------------------------------
//
// Deallocate gpu memory
// pp = memory pointer
//
void deallocate_device_T(void **pp) {
if (*pp != NULL) {
cudaCheck(hipFree((void *)(*pp)));
*pp = NULL;
}
}
//----------------------------------------------------------------------------------------
//
// Copies memory Host -> Device
//
void copy_HtoD_async_T(const void *h_array, void *d_array, size_t array_len, hipStream_t stream,
const size_t sizeofT) {
cudaCheck(hipMemcpyAsync(d_array, h_array, sizeofT*array_len, hipMemcpyHostToDevice, stream));
}
void copy_HtoD_T(const void *h_array, void *d_array, size_t array_len,
const size_t sizeofT) {
cudaCheck(hipMemcpy(d_array, h_array, sizeofT*array_len, hipMemcpyHostToDevice));
}
//----------------------------------------------------------------------------------------
//
// Copies memory Device -> Host
//
void copy_DtoH_async_T(const void *d_array, void *h_array, const size_t array_len, hipStream_t stream,
const size_t sizeofT) {
cudaCheck(hipMemcpyAsync(h_array, d_array, sizeofT*array_len, hipMemcpyDeviceToHost, stream));
}
void copy_DtoH_T(const void *d_array, void *h_array, const size_t array_len, const size_t sizeofT) {
cudaCheck(hipMemcpy(h_array, d_array, sizeofT*array_len, hipMemcpyDeviceToHost));
}
//----------------------------------------------------------------------------------------
#ifdef ENABLE_NVTOOLS
void gpuRangeStart(const char *range_name) {
static int color_id=0;
nvtxEventAttributes_t att;
att.version = NVTX_VERSION;
att.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
att.colorType = NVTX_COLOR_ARGB;
if (color_id == 0) {
att.color = 0xFFFF0000;
} else if (color_id == 1) {
att.color = 0xFF00FF00;
} else if (color_id == 2) {
att.color = 0xFF0000FF;
} else if (color_id == 3) {
att.color = 0xFFFF00FF;
}
color_id++;
if (color_id > 3) color_id = 0;
att.messageType = NVTX_MESSAGE_TYPE_ASCII;
att.message.ascii = range_name;
nvtxRangePushEx(&att);
}
void gpuRangeStop() {
roctxRangePop();
}
#endif
| 249a84e9118b2d770a4b1c74cac104b044f474dd.cu | /******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
#include <stdio.h>
#ifdef ENABLE_NVTOOLS
#include <nvToolsExtCuda.h>
#endif
#include "CudaUtils.h"
//----------------------------------------------------------------------------------------
void set_device_array_async_T(void *data, int value, const size_t ndata, cudaStream_t stream, const size_t sizeofT) {
cudaCheck(cudaMemsetAsync(data, value, sizeofT*ndata, stream));
}
void set_device_array_T(void *data, int value, const size_t ndata, const size_t sizeofT) {
cudaCheck(cudaMemset(data, value, sizeofT*ndata));
}
//----------------------------------------------------------------------------------------
//
// Allocate gpu memory
// pp = memory pointer
// len = length of the array
//
void allocate_device_T(void **pp, const size_t len, const size_t sizeofT) {
cudaCheck(cudaMalloc(pp, sizeofT*len));
}
//----------------------------------------------------------------------------------------
//
// Deallocate gpu memory
// pp = memory pointer
//
void deallocate_device_T(void **pp) {
if (*pp != NULL) {
cudaCheck(cudaFree((void *)(*pp)));
*pp = NULL;
}
}
//----------------------------------------------------------------------------------------
//
// Copies memory Host -> Device
//
void copy_HtoD_async_T(const void *h_array, void *d_array, size_t array_len, cudaStream_t stream,
const size_t sizeofT) {
cudaCheck(cudaMemcpyAsync(d_array, h_array, sizeofT*array_len, cudaMemcpyHostToDevice, stream));
}
void copy_HtoD_T(const void *h_array, void *d_array, size_t array_len,
const size_t sizeofT) {
cudaCheck(cudaMemcpy(d_array, h_array, sizeofT*array_len, cudaMemcpyHostToDevice));
}
//----------------------------------------------------------------------------------------
//
// Copies memory Device -> Host
//
void copy_DtoH_async_T(const void *d_array, void *h_array, const size_t array_len, cudaStream_t stream,
const size_t sizeofT) {
cudaCheck(cudaMemcpyAsync(h_array, d_array, sizeofT*array_len, cudaMemcpyDeviceToHost, stream));
}
void copy_DtoH_T(const void *d_array, void *h_array, const size_t array_len, const size_t sizeofT) {
cudaCheck(cudaMemcpy(h_array, d_array, sizeofT*array_len, cudaMemcpyDeviceToHost));
}
//----------------------------------------------------------------------------------------
#ifdef ENABLE_NVTOOLS
void gpuRangeStart(const char *range_name) {
static int color_id=0;
nvtxEventAttributes_t att;
att.version = NVTX_VERSION;
att.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
att.colorType = NVTX_COLOR_ARGB;
if (color_id == 0) {
att.color = 0xFFFF0000;
} else if (color_id == 1) {
att.color = 0xFF00FF00;
} else if (color_id == 2) {
att.color = 0xFF0000FF;
} else if (color_id == 3) {
att.color = 0xFFFF00FF;
}
color_id++;
if (color_id > 3) color_id = 0;
att.messageType = NVTX_MESSAGE_TYPE_ASCII;
att.message.ascii = range_name;
nvtxRangePushEx(&att);
}
void gpuRangeStop() {
nvtxRangePop();
}
#endif
|
a135e20b9e934c4881690a648313c46126e20c58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
// Simple define to index into a 1D array from 2D space
#define I2D(num, c, r) ((r)*(num)+(c))
/*
* `step_kernel_mod` is currently a direct copy of the CPU reference solution
* `step_kernel_ref` below. Accelerate it to run as a CUDA kernel.
*/
// =============================================================
__global__
void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
// loop over all points in domain (except boundary)
if (j > 0 && i > 0 && j < nj-1 && i < ni-1) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
// =============================================================
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
// =============================================================
int main()
{
int istep;
int nstep = 200; // number of time steps
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
temp1_ref = (float*)malloc(size);
temp2_ref = (float*)malloc(size);
//temp1 = (float*)malloc(size);
//temp2 = (float*)malloc(size);
hipMallocManaged(&temp1, size);
hipMallocManaged(&temp2, size);
// Initialize with random data
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
// Execute the CPU-only reference version
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref= temp_tmp;
}
dim3 block(32, 16, 1);
dim3 grid((nj/block.x)+1, (ni/block.y)+1, 1);
// Execute the modified version using same data
for (istep=0; istep < nstep; istep++) {
hipLaunchKernelGGL(( step_kernel_mod), dim3(grid), dim3(block), 0, 0, ni, nj, tfac, temp1, temp2);
hipDeviceSynchronize();
// swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2= temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
free( temp1_ref );
free( temp2_ref );
hipFree( temp1 );
hipFree( temp2 );
return 0;
}
| a135e20b9e934c4881690a648313c46126e20c58.cu | #include <stdio.h>
#include <math.h>
// Simple define to index into a 1D array from 2D space
#define I2D(num, c, r) ((r)*(num)+(c))
/*
* `step_kernel_mod` is currently a direct copy of the CPU reference solution
* `step_kernel_ref` below. Accelerate it to run as a CUDA kernel.
*/
// =============================================================
__global__
void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
// loop over all points in domain (except boundary)
if (j > 0 && i > 0 && j < nj-1 && i < ni-1) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
// =============================================================
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
// =============================================================
int main()
{
int istep;
int nstep = 200; // number of time steps
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
temp1_ref = (float*)malloc(size);
temp2_ref = (float*)malloc(size);
//temp1 = (float*)malloc(size);
//temp2 = (float*)malloc(size);
cudaMallocManaged(&temp1, size);
cudaMallocManaged(&temp2, size);
// Initialize with random data
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
// Execute the CPU-only reference version
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref= temp_tmp;
}
dim3 block(32, 16, 1);
dim3 grid((nj/block.x)+1, (ni/block.y)+1, 1);
// Execute the modified version using same data
for (istep=0; istep < nstep; istep++) {
step_kernel_mod<<<grid, block>>>(ni, nj, tfac, temp1, temp2);
cudaDeviceSynchronize();
// swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2= temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
free( temp1_ref );
free( temp2_ref );
cudaFree( temp1 );
cudaFree( temp2 );
return 0;
}
|
4ac2f4ba67b3769a1cb8e9b050054b805d4fc0c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel2_x;
int xdim0_advec_mom_kernel2_x_h = -1;
__constant__ int ydim0_advec_mom_kernel2_x;
int ydim0_advec_mom_kernel2_x_h = -1;
__constant__ int xdim1_advec_mom_kernel2_x;
int xdim1_advec_mom_kernel2_x_h = -1;
__constant__ int ydim1_advec_mom_kernel2_x;
int ydim1_advec_mom_kernel2_x_h = -1;
__constant__ int xdim2_advec_mom_kernel2_x;
int xdim2_advec_mom_kernel2_x_h = -1;
__constant__ int ydim2_advec_mom_kernel2_x;
int ydim2_advec_mom_kernel2_x_h = -1;
__constant__ int xdim3_advec_mom_kernel2_x;
int xdim3_advec_mom_kernel2_x_h = -1;
__constant__ int ydim3_advec_mom_kernel2_x;
int ydim3_advec_mom_kernel2_x_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel2_x * (y) + \
xdim0_advec_mom_kernel2_x * ydim0_advec_mom_kernel2_x * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel2_x * (y) + \
xdim1_advec_mom_kernel2_x * ydim1_advec_mom_kernel2_x * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel2_x * (y) + \
xdim2_advec_mom_kernel2_x * ydim2_advec_mom_kernel2_x * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel2_x * (y) + \
xdim3_advec_mom_kernel2_x * ydim3_advec_mom_kernel2_x * (z))
// user function
__device__
inline void
advec_mom_kernel2_x(double *vel1, const double *node_mass_post,
const double *node_mass_pre, const double *mom_flux) {
vel1[OPS_ACC0(0, 0, 0)] =
(vel1[OPS_ACC0(0, 0, 0)] * node_mass_pre[OPS_ACC2(0, 0, 0)] +
mom_flux[OPS_ACC3(-1, 0, 0)] - mom_flux[OPS_ACC3(0, 0, 0)]) /
node_mass_post[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_mom_kernel2_x(double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim0_advec_mom_kernel2_x * ydim0_advec_mom_kernel2_x;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim1_advec_mom_kernel2_x * ydim1_advec_mom_kernel2_x;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim2_advec_mom_kernel2_x * ydim2_advec_mom_kernel2_x;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim3_advec_mom_kernel2_x * ydim3_advec_mom_kernel2_x;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel2_x(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel2_x(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 28))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(28, "advec_mom_kernel2_x");
OPS_kernels[28].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel2_x_h ||
ydim0 != ydim0_advec_mom_kernel2_x_h ||
xdim1 != xdim1_advec_mom_kernel2_x_h ||
ydim1 != ydim1_advec_mom_kernel2_x_h ||
xdim2 != xdim2_advec_mom_kernel2_x_h ||
ydim2 != ydim2_advec_mom_kernel2_x_h ||
xdim3 != xdim3_advec_mom_kernel2_x_h ||
ydim3 != ydim3_advec_mom_kernel2_x_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel2_x, &xdim0, sizeof(int));
xdim0_advec_mom_kernel2_x_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel2_x, &ydim0, sizeof(int));
ydim0_advec_mom_kernel2_x_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel2_x, &xdim1, sizeof(int));
xdim1_advec_mom_kernel2_x_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel2_x, &ydim1, sizeof(int));
ydim1_advec_mom_kernel2_x_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_mom_kernel2_x, &xdim2, sizeof(int));
xdim2_advec_mom_kernel2_x_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_mom_kernel2_x, &ydim2, sizeof(int));
ydim2_advec_mom_kernel2_x_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_mom_kernel2_x, &xdim3, sizeof(int));
xdim3_advec_mom_kernel2_x_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_mom_kernel2_x, &ydim3, sizeof(int));
ydim3_advec_mom_kernel2_x_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel2_x), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[28].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
| 4ac2f4ba67b3769a1cb8e9b050054b805d4fc0c6.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel2_x;
int xdim0_advec_mom_kernel2_x_h = -1;
__constant__ int ydim0_advec_mom_kernel2_x;
int ydim0_advec_mom_kernel2_x_h = -1;
__constant__ int xdim1_advec_mom_kernel2_x;
int xdim1_advec_mom_kernel2_x_h = -1;
__constant__ int ydim1_advec_mom_kernel2_x;
int ydim1_advec_mom_kernel2_x_h = -1;
__constant__ int xdim2_advec_mom_kernel2_x;
int xdim2_advec_mom_kernel2_x_h = -1;
__constant__ int ydim2_advec_mom_kernel2_x;
int ydim2_advec_mom_kernel2_x_h = -1;
__constant__ int xdim3_advec_mom_kernel2_x;
int xdim3_advec_mom_kernel2_x_h = -1;
__constant__ int ydim3_advec_mom_kernel2_x;
int ydim3_advec_mom_kernel2_x_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel2_x * (y) + \
xdim0_advec_mom_kernel2_x * ydim0_advec_mom_kernel2_x * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel2_x * (y) + \
xdim1_advec_mom_kernel2_x * ydim1_advec_mom_kernel2_x * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel2_x * (y) + \
xdim2_advec_mom_kernel2_x * ydim2_advec_mom_kernel2_x * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel2_x * (y) + \
xdim3_advec_mom_kernel2_x * ydim3_advec_mom_kernel2_x * (z))
// user function
__device__
inline void
advec_mom_kernel2_x(double *vel1, const double *node_mass_post,
const double *node_mass_pre, const double *mom_flux) {
vel1[OPS_ACC0(0, 0, 0)] =
(vel1[OPS_ACC0(0, 0, 0)] * node_mass_pre[OPS_ACC2(0, 0, 0)] +
mom_flux[OPS_ACC3(-1, 0, 0)] - mom_flux[OPS_ACC3(0, 0, 0)]) /
node_mass_post[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_mom_kernel2_x(double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim0_advec_mom_kernel2_x * ydim0_advec_mom_kernel2_x;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim1_advec_mom_kernel2_x * ydim1_advec_mom_kernel2_x;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim2_advec_mom_kernel2_x * ydim2_advec_mom_kernel2_x;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel2_x +
idx_z * 1 * 1 * xdim3_advec_mom_kernel2_x * ydim3_advec_mom_kernel2_x;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel2_x(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel2_x(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 28))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(28, "advec_mom_kernel2_x");
OPS_kernels[28].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel2_x_h ||
ydim0 != ydim0_advec_mom_kernel2_x_h ||
xdim1 != xdim1_advec_mom_kernel2_x_h ||
ydim1 != ydim1_advec_mom_kernel2_x_h ||
xdim2 != xdim2_advec_mom_kernel2_x_h ||
ydim2 != ydim2_advec_mom_kernel2_x_h ||
xdim3 != xdim3_advec_mom_kernel2_x_h ||
ydim3 != ydim3_advec_mom_kernel2_x_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel2_x, &xdim0, sizeof(int));
xdim0_advec_mom_kernel2_x_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel2_x, &ydim0, sizeof(int));
ydim0_advec_mom_kernel2_x_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel2_x, &xdim1, sizeof(int));
xdim1_advec_mom_kernel2_x_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel2_x, &ydim1, sizeof(int));
ydim1_advec_mom_kernel2_x_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_mom_kernel2_x, &xdim2, sizeof(int));
xdim2_advec_mom_kernel2_x_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_mom_kernel2_x, &ydim2, sizeof(int));
ydim2_advec_mom_kernel2_x_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_mom_kernel2_x, &xdim3, sizeof(int));
xdim3_advec_mom_kernel2_x_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_mom_kernel2_x, &ydim3, sizeof(int));
ydim3_advec_mom_kernel2_x_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel2_x<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[28].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[28].mpi_time += t2 - t1;
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
|
c6e71e53b201d3781e42d759ab6fa9bf923811da.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "time.h"
#include <windows.h>
#include "device_launch_parameters.h"
#include "stdio.h"
// includes, project
// Thread block size
#define BLOCK_SIZE 512
// Matrix dimensions
// (chosen as multiples of the thread block size for simplicity)
#define WA (3 * BLOCK_SIZE) // Matrix A width
#define HA (5 * BLOCK_SIZE) // Matrix A height
#define WB 1 // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
//sequential code implemented on cpu
void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA)
{
for (unsigned int i = 0; i < hA; ++i)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k ];
sum += a * b;
}
C[i] = (float)sum;
}
}
// Initialize a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__global__ void matrixMul(float* C, float* A, float* B,int wA)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int Cindex = bx*BLOCK_SIZE + tx;
int ABase = Cindex*wA;
int i;
float sum=0;
for (i = 0; i < wA; i++)
{
sum += B[i] * A[ABase + i];
}
C[Cindex] = sum;
}
int main(int argc, char **argv)
{
LARGE_INTEGER start, finish;
LARGE_INTEGER freq;
double costtime1;
double costtime2;
double speedup;
// set seed for rand()
srand((unsigned)time(NULL));
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
hipMalloc((void**)&d_A, mem_size_A);
float* d_B;
hipMalloc((void**)&d_B, mem_size_B);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
hipMalloc((void**)&d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*)malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// setup execution parameters
dim3 threads(BLOCK_SIZE);
dim3 grid(HA/BLOCK_SIZE);
// execute the kernel
QueryPerformanceFrequency(&freq);
QueryPerformanceCounter(&start);
hipLaunchKernelGGL(( matrixMul) , dim3(grid),dim3(threads) , 0, 0, d_C, d_A, d_B,WA);
hipDeviceSynchronize();
QueryPerformanceCounter(&finish);
// stop and destroy timer
costtime1 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
QueryPerformanceCounter(&start);
computeGold(reference, h_A, h_B, HA, WA);
QueryPerformanceCounter(&finish);
costtime2 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
speedup = costtime2 / costtime1;
printf("time1: %f ms\n", costtime1);
printf("time2: %f ms\n", costtime2);
printf("speedup is %f\n", speedup);
// check result
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
getchar();
}
| c6e71e53b201d3781e42d759ab6fa9bf923811da.cu | #include <stdlib.h>
#include <string.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_functions.h"
#include "time.h"
#include <windows.h>
#include "device_launch_parameters.h"
#include "stdio.h"
// includes, project
// Thread block size
#define BLOCK_SIZE 512
// Matrix dimensions
// (chosen as multiples of the thread block size for simplicity)
#define WA (3 * BLOCK_SIZE) // Matrix A width
#define HA (5 * BLOCK_SIZE) // Matrix A height
#define WB 1 // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
//sequential code implemented on cpu
void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA)
{
for (unsigned int i = 0; i < hA; ++i)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k ];
sum += a * b;
}
C[i] = (float)sum;
}
}
// Initialize a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__global__ void matrixMul(float* C, float* A, float* B,int wA)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int Cindex = bx*BLOCK_SIZE + tx;
int ABase = Cindex*wA;
int i;
float sum=0;
for (i = 0; i < wA; i++)
{
sum += B[i] * A[ABase + i];
}
C[Cindex] = sum;
}
int main(int argc, char **argv)
{
LARGE_INTEGER start, finish;
LARGE_INTEGER freq;
double costtime1;
double costtime2;
double speedup;
// set seed for rand()
srand((unsigned)time(NULL));
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
cudaMalloc((void**)&d_A, mem_size_A);
float* d_B;
cudaMalloc((void**)&d_B, mem_size_B);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
cudaMalloc((void**)&d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*)malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// setup execution parameters
dim3 threads(BLOCK_SIZE);
dim3 grid(HA/BLOCK_SIZE);
// execute the kernel
QueryPerformanceFrequency(&freq);
QueryPerformanceCounter(&start);
matrixMul <<< grid,threads >>>(d_C, d_A, d_B,WA);
cudaThreadSynchronize();
QueryPerformanceCounter(&finish);
// stop and destroy timer
costtime1 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
QueryPerformanceCounter(&start);
computeGold(reference, h_A, h_B, HA, WA);
QueryPerformanceCounter(&finish);
costtime2 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
speedup = costtime2 / costtime1;
printf("time1: %f ms\n", costtime1);
printf("time2: %f ms\n", costtime2);
printf("speedup is %f\n", speedup);
// check result
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
getchar();
}
|
fb04c058b2352809d63ce5b0ff4fd9273c12dcf0.hip | // !!! This is a file automatically generated by hipify!!!
//Example 2. Application Using C and cuBLAS: 0-based indexing
//-----------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
hipblasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
hipblasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * N + j + 1);
}
}
cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != hipSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data download failed");
hipFree (devPtrA);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
hipFree (devPtrA);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
hipFree (devPtrA);
hipblasDestroy(handle);
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
return EXIT_SUCCESS;
} | fb04c058b2352809d63ce5b0ff4fd9273c12dcf0.cu | //Example 2. Application Using C and cuBLAS: 0-based indexing
//-----------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define M 6
#define N 5
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
cublasSscal (handle, n-q, &alpha, &m[IDX2C(p,q,ldm)], ldm);
cublasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1);
}
int main (void){
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
a[IDX2C(i,j,M)] = (float)(i * N + j + 1);
}
}
cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download failed");
cudaFree (devPtrA);
cublasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f);
stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
cudaFree (devPtrA);
cublasDestroy(handle);
return EXIT_FAILURE;
}
cudaFree (devPtrA);
cublasDestroy(handle);
for (j = 0; j < N; j++) {
for (i = 0; i < M; i++) {
printf ("%7.0f", a[IDX2C(i,j,M)]);
}
printf ("\n");
}
free(a);
return EXIT_SUCCESS;
} |
bd91b607017fad7b85ef74328dfa04d0d221fea4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
#include "clusteringZero.h"
using namespace std;
__global__ void clusteringZero(float **var, int **intVar){
int nfib = *intVar[6];
int *cluster = intVar[35];
int *clusterAccess = intVar[36];
int m = threadIdx.x+blockIdx.x*blockDim.x;
int ind;
clusterAccess[m] = 1;
for(ind = 0; ind<nfib; ind++){
cluster[m*nfib+ind] = 0;
if(ind == m) cluster[m*nfib+ind] = 1;
}
}
| bd91b607017fad7b85ef74328dfa04d0d221fea4.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include "clusteringZero.h"
using namespace std;
__global__ void clusteringZero(float **var, int **intVar){
int nfib = *intVar[6];
int *cluster = intVar[35];
int *clusterAccess = intVar[36];
int m = threadIdx.x+blockIdx.x*blockDim.x;
int ind;
clusterAccess[m] = 1;
for(ind = 0; ind<nfib; ind++){
cluster[m*nfib+ind] = 0;
if(ind == m) cluster[m*nfib+ind] = 1;
}
}
|
5e34f090b39856020ba60b30d87b6e073b578f54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "PPPMForceComputeGPU.cuh"
#include "hoomd/TextureTools.h"
// __scalar2int_rd is __float2int_rd in single, __double2int_rd in double
#ifdef SINGLE_PRECISION
#define __scalar2int_rd __float2int_rd
#else
#define __scalar2int_rd __double2int_rd
#endif
#define GPU_PPPM_MAX_ORDER 7
// workaround for HIP bug
#ifdef __HIP_PLATFORM_HCC__
inline __device__ float myAtomicAdd(float* address, float val)
{
unsigned int* address_as_uint = (unsigned int*)address;
unsigned int old = *address_as_uint, assumed;
do
{
assumed = old;
old = atomicCAS(address_as_uint, assumed, __float_as_uint(val + __uint_as_float(assumed)));
} while (assumed != old);
return __uint_as_float(old);
}
#else
inline __device__ float myAtomicAdd(float* address, float val)
{
return atomicAdd(address, val);
}
#endif
//! GPU implementation of sinc(x)==sin(x)/x
__device__ Scalar gpu_sinc(Scalar x)
{
Scalar sinc = 0;
//! Coefficients of a power expansion of sin(x)/x
const Scalar sinc_coeff[] = {Scalar(1.0),
Scalar(-1.0 / 6.0),
Scalar(1.0 / 120.0),
Scalar(-1.0 / 5040.0),
Scalar(1.0 / 362880.0),
Scalar(-1.0 / 39916800.0)};
if (x * x <= Scalar(1.0))
{
Scalar term = Scalar(1.0);
for (unsigned int i = 0; i < 6; ++i)
{
sinc += sinc_coeff[i] * term;
term *= x * x;
}
}
else
{
sinc = fast::sin(x) / x;
}
return sinc;
}
__device__ int3 find_cell(const Scalar3& pos,
const unsigned int& inner_nx,
const unsigned int& inner_ny,
const unsigned int& inner_nz,
const uint3& n_ghost_cells,
const BoxDim& box,
int order,
Scalar3& dr)
{
// compute coordinates in units of the mesh size
Scalar3 f = box.makeFraction(pos);
uchar3 periodic = box.getPeriodic();
Scalar3 reduced_pos
= make_scalar3(f.x * (Scalar)inner_nx, f.y * (Scalar)inner_ny, f.z * (Scalar)inner_nz);
reduced_pos += make_scalar3(n_ghost_cells.x, n_ghost_cells.y, n_ghost_cells.z);
Scalar shift, shiftone;
if (order % 2)
{
shift = Scalar(0.5);
shiftone = Scalar(0.0);
}
else
{
shift = Scalar(0.0);
shiftone = Scalar(0.5);
}
int ix = __scalar2int_rd(reduced_pos.x + shift);
int iy = __scalar2int_rd(reduced_pos.y + shift);
int iz = __scalar2int_rd(reduced_pos.z + shift);
// set distance to cell center
dr.x = shiftone + (Scalar)ix - reduced_pos.x;
dr.y = shiftone + (Scalar)iy - reduced_pos.y;
dr.z = shiftone + (Scalar)iz - reduced_pos.z;
// handle particles on the boundary
if (periodic.x && ix == (int)inner_nx)
ix = 0;
if (periodic.y && iy == (int)inner_ny)
iy = 0;
if (periodic.z && iz == (int)inner_nz)
iz = 0;
return make_int3(ix, iy, iz);
}
__global__ void gpu_assign_particles_kernel(const uint3 mesh_dim,
const uint3 n_ghost_bins,
unsigned int work_size,
const unsigned int* d_index_array,
const Scalar4* d_postype,
const Scalar* d_charge,
hipfftComplex* d_mesh,
Scalar V_cell,
int order,
unsigned int offset,
BoxDim box,
const Scalar* d_rho_coeff)
{
extern __shared__ Scalar s_coeff[];
// load in interpolation coefficients
unsigned int ncoeffs = order * (2 * order + 1);
for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < ncoeffs)
{
s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x];
}
}
__syncthreads();
unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx >= work_size)
return;
unsigned int group_idx = work_idx + offset;
int3 bin_dim = make_int3(mesh_dim.x + 2 * n_ghost_bins.x,
mesh_dim.y + 2 * n_ghost_bins.y,
mesh_dim.z + 2 * n_ghost_bins.z);
// grid coordinates of bin (column-major)
unsigned int idx = d_index_array[group_idx];
Scalar4 postype = d_postype[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar qi = d_charge[idx];
// compute coordinates in units of the cell size
Scalar3 dr = make_scalar3(0, 0, 0);
int3 bin_coord
= find_cell(pos, mesh_dim.x, mesh_dim.y, mesh_dim.z, n_ghost_bins, box, order, dr);
// ignore particles that are not within our domain (the error should be caught by HOOMD's cell
// list)
if (bin_coord.x < 0 || bin_coord.x >= bin_dim.x || bin_coord.y < 0 || bin_coord.y >= bin_dim.y
|| bin_coord.z < 0 || bin_coord.z >= bin_dim.z)
{
return;
}
int i = bin_coord.x;
int j = bin_coord.y;
int k = bin_coord.z;
int nlower = -(order - 1) / 2;
int nupper = order / 2;
Scalar result;
int mult_fact = 2 * order + 1;
Scalar x0 = qi;
bool ignore_x = false;
bool ignore_y = false;
bool ignore_z = false;
// loop over neighboring bins
for (int l = nlower; l <= nupper; ++l)
{
// precalculate assignment factor
result = Scalar(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
result = s_coeff[l - nlower + iorder * mult_fact] + result * dr.x;
}
Scalar y0 = x0 * result;
int neighi = i + l;
if (neighi >= (int)bin_dim.x)
{
if (!n_ghost_bins.x)
neighi -= (int)bin_dim.x;
else
ignore_x = true;
}
else if (neighi < 0)
{
if (!n_ghost_bins.x)
neighi += (int)bin_dim.x;
else
ignore_x = true;
}
for (int m = nlower; m <= nupper; ++m)
{
result = Scalar(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
result = s_coeff[m - nlower + iorder * mult_fact] + result * dr.y;
}
Scalar z0 = y0 * result;
int neighj = j + m;
if (neighj >= (int)bin_dim.y)
{
if (!n_ghost_bins.y)
neighj -= (int)bin_dim.y;
else
ignore_y = true;
}
else if (neighj < 0)
{
if (!n_ghost_bins.y)
neighj += (int)bin_dim.y;
else
ignore_y = true;
}
for (int n = nlower; n <= nupper; ++n)
{
result = Scalar(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
result = s_coeff[n - nlower + iorder * mult_fact] + result * dr.z;
}
int neighk = k + n;
if (neighk >= (int)bin_dim.z)
{
if (!n_ghost_bins.z)
neighk -= (int)bin_dim.z;
else
ignore_z = true;
}
else if (neighk < 0)
{
if (!n_ghost_bins.z)
neighk += (int)bin_dim.z;
else
ignore_z = true;
}
if (!ignore_x && !ignore_y && !ignore_z)
{
// write out to global memory using row-major
unsigned int cell_idx = neighi + bin_dim.x * (neighj + bin_dim.y * neighk);
// compute fraction of particle density assigned to cell
// from particles in this bin
myAtomicAdd(&d_mesh[cell_idx].x, z0 * result / V_cell);
}
ignore_z = false;
}
ignore_y = false;
}
ignore_x = false;
} // end of loop over neighboring bins
}
__global__ void gpu_reduce_meshes(const unsigned int mesh_elements,
const hipfftComplex* d_mesh_scratch,
hipfftComplex* d_mesh,
unsigned int ngpu)
{
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= mesh_elements)
return;
hipfftComplex res;
res.x = 0;
res.y = 0;
// reduce over all temporary meshes
for (unsigned int igpu = 0; igpu < ngpu; ++igpu)
{
hipfftComplex m = d_mesh_scratch[idx + igpu * mesh_elements];
res.x += m.x;
res.y += m.y;
}
d_mesh[idx] = res;
}
void gpu_assign_particles(const uint3 mesh_dim,
const uint3 n_ghost_bins,
const uint3 grid_dim,
unsigned int group_size,
const unsigned int* d_index_array,
const Scalar4* d_postype,
const Scalar* d_charge,
hipfftComplex* d_mesh,
hipfftComplex* d_mesh_scratch,
const unsigned int mesh_elements,
int order,
const BoxDim& box,
unsigned int block_size,
const Scalar* d_rho_coeff,
const hipDeviceProp_t& dev_prop,
const GPUPartition& gpu_partition)
{
hipMemsetAsync(d_mesh, 0, sizeof(hipfftComplex) * grid_dim.x * grid_dim.y * grid_dim.z);
Scalar V_cell = box.getVolume() / (Scalar)(mesh_dim.x * mesh_dim.y * mesh_dim.z);
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_assign_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
while (attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock)
{
run_block_size -= dev_prop.warpSize;
}
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
unsigned int ngpu = gpu_partition.getNumActiveGPUs();
for (int idev = ngpu - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
if (ngpu > 1)
{
// zero the temporary mesh array
hipMemsetAsync(d_mesh_scratch + idev * mesh_elements,
0,
sizeof(hipfftComplex) * mesh_elements);
}
unsigned int nwork = range.second - range.first;
unsigned int n_blocks = nwork / run_block_size + 1;
const size_t shared_bytes = order * (2 * order + 1) * sizeof(Scalar);
hipLaunchKernelGGL((gpu_assign_particles_kernel),
dim3(n_blocks),
dim3(run_block_size),
shared_bytes,
0,
mesh_dim,
n_ghost_bins,
nwork,
d_index_array,
d_postype,
d_charge,
ngpu > 1 ? d_mesh_scratch + idev * mesh_elements : d_mesh,
V_cell,
order,
range.first,
box,
d_rho_coeff);
}
}
//! Reduce temporary arrays for every GPU
void gpu_reduce_meshes(const unsigned int mesh_elements,
const hipfftComplex* d_mesh_scratch,
hipfftComplex* d_mesh,
const unsigned int ngpu,
const unsigned int block_size)
{
// reduce meshes on GPU 0
hipLaunchKernelGGL((gpu_reduce_meshes),
dim3(mesh_elements / block_size + 1),
dim3(block_size),
0,
0,
mesh_elements,
d_mesh_scratch,
d_mesh,
ngpu);
}
__global__ void gpu_compute_mesh_virial_kernel(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
Scalar* d_inf_f,
Scalar* d_virial_mesh,
const Scalar3* d_k,
const bool exclude_dc,
Scalar kappa)
{
unsigned int idx;
idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= n_wave_vectors)
return;
if (!exclude_dc || idx != 0)
{
// non-zero wave vector
hipfftComplex fourier = d_fourier_mesh[idx];
Scalar3 k = d_k[idx];
Scalar rhog = (fourier.x * fourier.x + fourier.y * fourier.y) * d_inf_f[idx];
Scalar vterm = -Scalar(2.0) * (Scalar(1.0) / dot(k, k) + Scalar(0.25) / (kappa * kappa));
d_virial_mesh[0 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.x * k.x); // xx
d_virial_mesh[1 * n_wave_vectors + idx] = rhog * (vterm * k.x * k.y); // xy
d_virial_mesh[2 * n_wave_vectors + idx] = rhog * (vterm * k.x * k.z); // xz
d_virial_mesh[3 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.y * k.y); // yy
d_virial_mesh[4 * n_wave_vectors + idx] = rhog * (vterm * k.y * k.z); // yz
d_virial_mesh[5 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.z * k.z); // zz
}
else
{
d_virial_mesh[0 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[1 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[2 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[3 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[4 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[5 * n_wave_vectors + idx] = Scalar(0.0);
}
}
void gpu_compute_mesh_virial(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
Scalar* d_inf_f,
Scalar* d_virial_mesh,
const Scalar3* d_k,
const bool exclude_dc,
Scalar kappa)
{
const unsigned int block_size = 256;
dim3 grid(n_wave_vectors / block_size + 1, 1, 1);
hipLaunchKernelGGL((gpu_compute_mesh_virial_kernel),
dim3(grid),
dim3(block_size),
0,
0,
n_wave_vectors,
d_fourier_mesh,
d_inf_f,
d_virial_mesh,
d_k,
exclude_dc,
kappa);
}
__global__ void gpu_update_meshes_kernel(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
hipfftComplex* d_fourier_mesh_G_x,
hipfftComplex* d_fourier_mesh_G_y,
hipfftComplex* d_fourier_mesh_G_z,
const Scalar* d_inf_f,
const Scalar3* d_k,
unsigned int NNN)
{
unsigned int k;
k = blockDim.x * blockIdx.x + threadIdx.x;
if (k >= n_wave_vectors)
return;
hipfftComplex f = d_fourier_mesh[k];
Scalar scaled_inf_f = d_inf_f[k] / ((Scalar)NNN);
Scalar3 kvec = d_k[k];
// Normalization
hipfftComplex fourier_G_x;
fourier_G_x.x = f.y * kvec.x * scaled_inf_f;
fourier_G_x.y = -f.x * kvec.x * scaled_inf_f;
hipfftComplex fourier_G_y;
fourier_G_y.x = f.y * kvec.y * scaled_inf_f;
fourier_G_y.y = -f.x * kvec.y * scaled_inf_f;
hipfftComplex fourier_G_z;
fourier_G_z.x = f.y * kvec.z * scaled_inf_f;
fourier_G_z.y = -f.x * kvec.z * scaled_inf_f;
// store in global memory
d_fourier_mesh_G_x[k] = fourier_G_x;
d_fourier_mesh_G_y[k] = fourier_G_y;
d_fourier_mesh_G_z[k] = fourier_G_z;
}
void gpu_update_meshes(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
hipfftComplex* d_fourier_mesh_G_x,
hipfftComplex* d_fourier_mesh_G_y,
hipfftComplex* d_fourier_mesh_G_z,
const Scalar* d_inf_f,
const Scalar3* d_k,
unsigned int NNN,
unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_update_meshes_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
dim3 grid(n_wave_vectors / run_block_size + 1, 1, 1);
hipLaunchKernelGGL((gpu_update_meshes_kernel),
dim3(grid),
dim3(run_block_size),
0,
0,
n_wave_vectors,
d_fourier_mesh,
d_fourier_mesh_G_x,
d_fourier_mesh_G_y,
d_fourier_mesh_G_z,
d_inf_f,
d_k,
NNN);
}
__global__ void gpu_compute_forces_kernel(const unsigned int work_size,
const Scalar4* d_postype,
Scalar4* d_force,
const uint3 grid_dim,
const uint3 n_ghost_cells,
const Scalar* d_charge,
const BoxDim box,
int order,
const unsigned int* d_index_array,
const hipfftComplex* inv_fourier_mesh_x,
const hipfftComplex* inv_fourier_mesh_y,
const hipfftComplex* inv_fourier_mesh_z,
const Scalar* d_rho_coeff,
const unsigned int offset)
{
extern __shared__ Scalar s_coeff[];
// load in interpolation coefficients
unsigned int ncoeffs = order * (2 * order + 1);
for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < ncoeffs)
{
s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x];
}
}
__syncthreads();
unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx >= work_size)
return;
unsigned int group_idx = work_idx + offset;
unsigned int idx = d_index_array[group_idx];
int3 inner_dim = make_int3(grid_dim.x - 2 * n_ghost_cells.x,
grid_dim.y - 2 * n_ghost_cells.y,
grid_dim.z - 2 * n_ghost_cells.z);
Scalar4 postype = d_postype[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
unsigned int type = __scalar_as_int(postype.w);
Scalar qi = d_charge[idx];
Scalar3 dr = make_scalar3(0, 0, 0);
// find cell the particle is in
int3 cell_coord
= find_cell(pos, inner_dim.x, inner_dim.y, inner_dim.z, n_ghost_cells, box, order, dr);
// ignore particles that are not within our domain (the error should be caught by HOOMD's cell
// list)
if (cell_coord.x < 0 || cell_coord.x >= (int)grid_dim.x || cell_coord.y < 0
|| cell_coord.y >= (int)grid_dim.y || cell_coord.z < 0 || cell_coord.z >= (int)grid_dim.z)
{
return;
}
Scalar3 force = make_scalar3(0.0, 0.0, 0.0);
int nlower = -(order - 1) / 2;
int nupper = order / 2;
Scalar result;
int mult_fact = 2 * order + 1;
// back-interpolate forces from neighboring mesh points
for (int l = nlower; l <= nupper; ++l)
{
result = Scalar(0.0);
for (int k = order - 1; k >= 0; k--)
{
result = s_coeff[l - nlower + k * mult_fact] + result * dr.x;
}
Scalar x0 = result;
for (int m = nlower; m <= nupper; ++m)
{
result = Scalar(0.0);
for (int k = order - 1; k >= 0; k--)
{
result = s_coeff[m - nlower + k * mult_fact] + result * dr.y;
}
Scalar y0 = x0 * result;
for (int n = nlower; n <= nupper; ++n)
{
result = Scalar(0.0);
for (int k = order - 1; k >= 0; k--)
{
result = s_coeff[n - nlower + k * mult_fact] + result * dr.z;
}
Scalar z0 = y0 * result;
int neighl = (int)cell_coord.x + l;
int neighm = (int)cell_coord.y + m;
int neighn = (int)cell_coord.z + n;
if (!n_ghost_cells.x)
{
if (neighl >= (int)grid_dim.x)
neighl -= grid_dim.x;
else if (neighl < 0)
neighl += grid_dim.x;
}
if (!n_ghost_cells.y)
{
if (neighm >= (int)grid_dim.y)
neighm -= grid_dim.y;
else if (neighm < 0)
neighm += grid_dim.y;
}
if (!n_ghost_cells.z)
{
if (neighn >= (int)grid_dim.z)
neighn -= grid_dim.z;
else if (neighn < 0)
neighn += grid_dim.z;
}
// use column-major layout
unsigned int cell_idx = neighl + grid_dim.x * (neighm + grid_dim.y * neighn);
hipfftComplex inv_mesh_x = inv_fourier_mesh_x[cell_idx];
hipfftComplex inv_mesh_y = inv_fourier_mesh_y[cell_idx];
hipfftComplex inv_mesh_z = inv_fourier_mesh_z[cell_idx];
force.x += qi * z0 * inv_mesh_x.x;
force.y += qi * z0 * inv_mesh_y.x;
force.z += qi * z0 * inv_mesh_z.x;
}
}
} // end neighbor cells loop
d_force[idx] = make_scalar4(force.x, force.y, force.z, 0.0);
}
void gpu_compute_forces(const unsigned int N,
const Scalar4* d_postype,
Scalar4* d_force,
const hipfftComplex* d_inv_fourier_mesh_x,
const hipfftComplex* d_inv_fourier_mesh_y,
const hipfftComplex* d_inv_fourier_mesh_z,
const uint3 grid_dim,
const uint3 n_ghost_cells,
const Scalar* d_charge,
const BoxDim& box,
int order,
const unsigned int* d_index_array,
const GPUPartition& gpu_partition,
const GPUPartition& all_gpu_partition,
const Scalar* d_rho_coeff,
unsigned int block_size,
bool local_fft,
unsigned int inv_mesh_elements)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = all_gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = all_gpu_partition.getRangeAndSetGPU(idev);
// reset force array for ALL particles
hipMemsetAsync(d_force + range.first, 0, sizeof(Scalar4) * (range.second - range.first));
}
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
unsigned int n_blocks = nwork / run_block_size + 1;
const size_t shared_bytes = order * (2 * order + 1) * sizeof(Scalar);
hipLaunchKernelGGL(
(gpu_compute_forces_kernel),
dim3(n_blocks),
dim3(run_block_size),
shared_bytes,
0,
nwork,
d_postype,
d_force,
grid_dim,
n_ghost_cells,
d_charge,
box,
order,
d_index_array,
local_fft ? d_inv_fourier_mesh_x + idev * inv_mesh_elements : d_inv_fourier_mesh_x,
local_fft ? d_inv_fourier_mesh_y + idev * inv_mesh_elements : d_inv_fourier_mesh_y,
local_fft ? d_inv_fourier_mesh_z + idev * inv_mesh_elements : d_inv_fourier_mesh_z,
d_rho_coeff,
range.first);
}
}
__global__ void kernel_calculate_pe_partial(int n_wave_vectors,
Scalar* sum_partial,
const hipfftComplex* d_fourier_mesh,
const Scalar* d_inf_f,
const bool exclude_dc)
{
HIP_DYNAMIC_SHARED(Scalar, sdata)
unsigned int tidx = threadIdx.x;
unsigned int j;
j = blockDim.x * blockIdx.x + threadIdx.x;
Scalar mySum = Scalar(0.0);
if (j < n_wave_vectors)
{
if (!exclude_dc || j != 0)
{
mySum = d_fourier_mesh[j].x * d_fourier_mesh[j].x
+ d_fourier_mesh[j].y * d_fourier_mesh[j].y;
mySum *= d_inf_f[j];
}
}
sdata[tidx] = mySum;
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (tidx < offs)
{
sdata[tidx] += sdata[tidx + offs];
}
offs >>= 1;
__syncthreads();
}
// write result to global memory
if (tidx == 0)
sum_partial[blockIdx.x] = sdata[0];
}
__global__ void kernel_final_reduce_pe(Scalar* sum_partial, unsigned int nblocks, Scalar* sum)
{
HIP_DYNAMIC_SHARED(Scalar, smem)
if (threadIdx.x == 0)
*sum = Scalar(0.0);
for (int start = 0; start < nblocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < nblocks)
smem[threadIdx.x] = sum_partial[start + threadIdx.x];
else
smem[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
smem[threadIdx.x] += smem[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
*sum += smem[0];
}
}
}
void gpu_compute_pe(unsigned int n_wave_vectors,
Scalar* d_sum_partial,
Scalar* d_sum,
const hipfftComplex* d_fourier_mesh,
const Scalar* d_inf_f,
const unsigned int block_size,
const uint3 mesh_dim,
const bool exclude_dc)
{
unsigned int n_blocks = n_wave_vectors / block_size + 1;
unsigned int shared_size = (unsigned int)(block_size * sizeof(Scalar));
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((kernel_calculate_pe_partial),
dim3(grid),
dim3(block_size),
shared_size,
0,
n_wave_vectors,
d_sum_partial,
d_fourier_mesh,
d_inf_f,
exclude_dc);
// calculate final sum of mesh values
const unsigned int final_block_size = 256;
shared_size = final_block_size * sizeof(Scalar);
hipLaunchKernelGGL((kernel_final_reduce_pe),
dim3(1),
dim3(final_block_size),
shared_size,
0,
d_sum_partial,
n_blocks,
d_sum);
}
__global__ void kernel_calculate_virial_partial(int n_wave_vectors,
Scalar* sum_virial_partial,
const Scalar* d_mesh_virial)
{
HIP_DYNAMIC_SHARED(Scalar, sdata)
unsigned int j;
j = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int tidx = threadIdx.x;
Scalar mySum_xx = Scalar(0.0);
Scalar mySum_xy = Scalar(0.0);
Scalar mySum_xz = Scalar(0.0);
Scalar mySum_yy = Scalar(0.0);
Scalar mySum_yz = Scalar(0.0);
Scalar mySum_zz = Scalar(0.0);
if (j < n_wave_vectors)
{
mySum_xx = d_mesh_virial[0 * n_wave_vectors + j];
mySum_xy = d_mesh_virial[1 * n_wave_vectors + j];
mySum_xz = d_mesh_virial[2 * n_wave_vectors + j];
mySum_yy = d_mesh_virial[3 * n_wave_vectors + j];
mySum_yz = d_mesh_virial[4 * n_wave_vectors + j];
mySum_zz = d_mesh_virial[5 * n_wave_vectors + j];
}
sdata[0 * blockDim.x + tidx] = mySum_xx;
sdata[1 * blockDim.x + tidx] = mySum_xy;
sdata[2 * blockDim.x + tidx] = mySum_xz;
sdata[3 * blockDim.x + tidx] = mySum_yy;
sdata[4 * blockDim.x + tidx] = mySum_yz;
sdata[5 * blockDim.x + tidx] = mySum_zz;
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (tidx < offs)
{
sdata[0 * blockDim.x + tidx] += sdata[0 * blockDim.x + tidx + offs];
sdata[1 * blockDim.x + tidx] += sdata[1 * blockDim.x + tidx + offs];
sdata[2 * blockDim.x + tidx] += sdata[2 * blockDim.x + tidx + offs];
sdata[3 * blockDim.x + tidx] += sdata[3 * blockDim.x + tidx + offs];
sdata[4 * blockDim.x + tidx] += sdata[4 * blockDim.x + tidx + offs];
sdata[5 * blockDim.x + tidx] += sdata[5 * blockDim.x + tidx + offs];
}
offs >>= 1;
__syncthreads();
}
// write result to global memory
if (tidx == 0)
{
sum_virial_partial[0 * gridDim.x + blockIdx.x] = sdata[0 * blockDim.x];
sum_virial_partial[1 * gridDim.x + blockIdx.x] = sdata[1 * blockDim.x];
sum_virial_partial[2 * gridDim.x + blockIdx.x] = sdata[2 * blockDim.x];
sum_virial_partial[3 * gridDim.x + blockIdx.x] = sdata[3 * blockDim.x];
sum_virial_partial[4 * gridDim.x + blockIdx.x] = sdata[4 * blockDim.x];
sum_virial_partial[5 * gridDim.x + blockIdx.x] = sdata[5 * blockDim.x];
}
}
__global__ void
kernel_final_reduce_virial(Scalar* sum_virial_partial, unsigned int nblocks, Scalar* sum_virial)
{
HIP_DYNAMIC_SHARED(Scalar, smem)
if (threadIdx.x == 0)
{
sum_virial[0] = Scalar(0.0);
sum_virial[1] = Scalar(0.0);
sum_virial[2] = Scalar(0.0);
sum_virial[3] = Scalar(0.0);
sum_virial[4] = Scalar(0.0);
sum_virial[5] = Scalar(0.0);
}
for (int start = 0; start < nblocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < nblocks)
{
smem[0 * blockDim.x + threadIdx.x]
= sum_virial_partial[0 * nblocks + start + threadIdx.x];
smem[1 * blockDim.x + threadIdx.x]
= sum_virial_partial[1 * nblocks + start + threadIdx.x];
smem[2 * blockDim.x + threadIdx.x]
= sum_virial_partial[2 * nblocks + start + threadIdx.x];
smem[3 * blockDim.x + threadIdx.x]
= sum_virial_partial[3 * nblocks + start + threadIdx.x];
smem[4 * blockDim.x + threadIdx.x]
= sum_virial_partial[4 * nblocks + start + threadIdx.x];
smem[5 * blockDim.x + threadIdx.x]
= sum_virial_partial[5 * nblocks + start + threadIdx.x];
}
else
{
smem[0 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[1 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[2 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[3 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[4 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[5 * blockDim.x + threadIdx.x] = Scalar(0.0);
}
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
smem[0 * blockDim.x + threadIdx.x] += smem[0 * blockDim.x + threadIdx.x + offs];
smem[1 * blockDim.x + threadIdx.x] += smem[1 * blockDim.x + threadIdx.x + offs];
smem[2 * blockDim.x + threadIdx.x] += smem[2 * blockDim.x + threadIdx.x + offs];
smem[3 * blockDim.x + threadIdx.x] += smem[3 * blockDim.x + threadIdx.x + offs];
smem[4 * blockDim.x + threadIdx.x] += smem[4 * blockDim.x + threadIdx.x + offs];
smem[5 * blockDim.x + threadIdx.x] += smem[5 * blockDim.x + threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
sum_virial[0] += smem[0 * blockDim.x];
sum_virial[1] += smem[1 * blockDim.x];
sum_virial[2] += smem[2 * blockDim.x];
sum_virial[3] += smem[3 * blockDim.x];
sum_virial[4] += smem[4 * blockDim.x];
sum_virial[5] += smem[5 * blockDim.x];
}
}
}
void gpu_compute_virial(unsigned int n_wave_vectors,
Scalar* d_sum_virial_partial,
Scalar* d_sum_virial,
const Scalar* d_mesh_virial,
const unsigned int block_size)
{
unsigned int n_blocks = n_wave_vectors / block_size + 1;
unsigned int shared_size = (unsigned int)(6 * block_size * sizeof(Scalar));
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((kernel_calculate_virial_partial),
dim3(grid),
dim3(block_size),
shared_size,
0,
n_wave_vectors,
d_sum_virial_partial,
d_mesh_virial);
// calculate final virial values
const unsigned int final_block_size = 256;
shared_size = 6 * final_block_size * sizeof(Scalar);
hipLaunchKernelGGL((kernel_final_reduce_virial),
dim3(1),
dim3(final_block_size),
shared_size,
0,
d_sum_virial_partial,
n_blocks,
d_sum_virial);
}
template<bool local_fft>
__global__ void gpu_compute_influence_function_kernel(const uint3 mesh_dim,
const unsigned int n_wave_vectors,
const uint3 global_dim,
Scalar* d_inf_f,
Scalar3* d_k,
const Scalar3 b1,
const Scalar3 b2,
const Scalar3 b3,
const uint3 pidx,
const uint3 pdim,
int nbx,
int nby,
int nbz,
const Scalar* gf_b,
int order,
Scalar kappa,
Scalar alpha)
{
unsigned int kidx;
kidx = blockDim.x * blockIdx.x + threadIdx.x;
if (kidx >= n_wave_vectors)
return;
int l, m, n;
if (local_fft)
{
// use row-major layout
int ny = mesh_dim.y;
int nx = mesh_dim.x;
n = kidx / ny / nx;
m = (kidx - n * ny * nx) / nx;
l = kidx % nx;
}
#ifdef ENABLE_MPI
else
{
// local layout: row-major
int ny = mesh_dim.y;
int nx = mesh_dim.x;
int n_local = kidx / ny / nx;
int m_local = (kidx - n_local * ny * nx) / nx;
int l_local = kidx % nx;
// cyclic distribution
l = l_local * pdim.x + pidx.x;
m = m_local * pdim.y + pidx.y;
n = n_local * pdim.z + pidx.z;
}
#endif
// compute Miller indices
if (l >= (int)(global_dim.x / 2 + global_dim.x % 2))
l -= (int)global_dim.x;
if (m >= (int)(global_dim.y / 2 + global_dim.y % 2))
m -= (int)global_dim.y;
if (n >= (int)(global_dim.z / 2 + global_dim.z % 2))
n -= (int)global_dim.z;
Scalar val;
Scalar3 kval = (Scalar)l * b1 + (Scalar)m * b2 + (Scalar)n * b3;
Scalar3 kH = Scalar(2.0 * M_PI)
* make_scalar3(Scalar(1.0) / (Scalar)global_dim.x,
Scalar(1.0) / (Scalar)global_dim.y,
Scalar(1.0) / (Scalar)global_dim.z);
Scalar snx = fast::sin(Scalar(0.5) * l * kH.x);
Scalar snx2 = snx * snx;
Scalar sny = fast::sin(Scalar(0.5) * m * kH.y);
Scalar sny2 = sny * sny;
Scalar snz = fast::sin(Scalar(0.5) * n * kH.z);
Scalar snz2 = snz * snz;
Scalar sx(0.0), sy(0.0), sz(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
sx = gf_b[iorder] + sx * snx2;
sy = gf_b[iorder] + sy * sny2;
sz = gf_b[iorder] + sz * snz2;
}
Scalar denominator = sx * sy * sz;
denominator *= denominator;
if (l != 0 || m != 0 || n != 0)
{
Scalar sum1(0.0);
Scalar numerator = Scalar(4.0 * M_PI) / dot(kval, kval);
for (int ix = -nbx; ix <= nbx; ix++)
{
Scalar qx = ((Scalar)l + (Scalar)ix * global_dim.x);
Scalar3 knx = qx * b1;
Scalar argx = Scalar(0.5) * qx * kH.x;
Scalar wxs = gpu_sinc(argx);
Scalar wx(1.0);
for (int iorder = 0; iorder < order; ++iorder)
{
wx *= wxs;
}
for (int iy = -nby; iy <= nby; iy++)
{
Scalar qy = ((Scalar)m + (Scalar)iy * global_dim.y);
Scalar3 kny = qy * b2;
Scalar argy = Scalar(0.5) * qy * kH.y;
Scalar wys = gpu_sinc(argy);
Scalar wy(1.0);
for (int iorder = 0; iorder < order; ++iorder)
{
wy *= wys;
}
for (int iz = -nbz; iz <= nbz; iz++)
{
Scalar qz = ((Scalar)n + (Scalar)iz * global_dim.z);
Scalar3 knz = qz * b3;
Scalar argz = Scalar(0.5) * qz * kH.z;
Scalar wzs = gpu_sinc(argz);
Scalar wz(1.0);
for (int iorder = 0; iorder < order; ++iorder)
{
wz *= wzs;
}
Scalar3 kn = knx + kny + knz;
Scalar dot1 = dot(kn, kval);
Scalar dot2 = dot(kn, kn) + alpha * alpha;
Scalar arg_gauss = Scalar(0.25) * dot2 / kappa / kappa;
Scalar gauss = exp(-arg_gauss);
sum1 += (dot1 / dot2) * gauss * wx * wx * wy * wy * wz * wz;
}
}
}
val = numerator * sum1 / denominator;
}
else
{
val = Scalar(0.0);
}
// write out result
d_inf_f[kidx] = val;
d_k[kidx] = kval;
}
void gpu_compute_influence_function(const uint3 mesh_dim,
const uint3 global_dim,
Scalar* d_inf_f,
Scalar3* d_k,
const BoxDim& global_box,
const bool local_fft,
const uint3 pidx,
const uint3 pdim,
const Scalar EPS_HOC,
Scalar kappa,
Scalar alpha,
const Scalar* d_gf_b,
int order,
unsigned int block_size)
{
// compute reciprocal lattice vectors
Scalar3 a1 = global_box.getLatticeVector(0);
Scalar3 a2 = global_box.getLatticeVector(1);
Scalar3 a3 = global_box.getLatticeVector(2);
Scalar V_box = global_box.getVolume();
Scalar3 b1 = Scalar(2.0 * M_PI)
* make_scalar3(a2.y * a3.z - a2.z * a3.y,
a2.z * a3.x - a2.x * a3.z,
a2.x * a3.y - a2.y * a3.x)
/ V_box;
Scalar3 b2 = Scalar(2.0 * M_PI)
* make_scalar3(a3.y * a1.z - a3.z * a1.y,
a3.z * a1.x - a3.x * a1.z,
a3.x * a1.y - a3.y * a1.x)
/ V_box;
Scalar3 b3 = Scalar(2.0 * M_PI)
* make_scalar3(a1.y * a2.z - a1.z * a2.y,
a1.z * a2.x - a1.x * a2.z,
a1.x * a2.y - a1.y * a2.x)
/ V_box;
unsigned int num_wave_vectors = mesh_dim.x * mesh_dim.y * mesh_dim.z;
Scalar3 L = global_box.getL();
Scalar temp = floor(((kappa * L.x / (M_PI * global_dim.x)) * pow(-log(EPS_HOC), 0.25)));
int nbx = (int)temp;
temp = floor(((kappa * L.y / (M_PI * global_dim.y)) * pow(-log(EPS_HOC), 0.25)));
int nby = (int)temp;
temp = floor(((kappa * L.z / (M_PI * global_dim.z)) * pow(-log(EPS_HOC), 0.25)));
int nbz = (int)temp;
if (local_fft)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<true>);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
unsigned int n_blocks = num_wave_vectors / run_block_size;
if (num_wave_vectors % run_block_size)
n_blocks += 1;
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((gpu_compute_influence_function_kernel<true>),
dim3(grid),
dim3(run_block_size),
0,
0,
mesh_dim,
num_wave_vectors,
global_dim,
d_inf_f,
d_k,
b1,
b2,
b3,
pidx,
pdim,
nbx,
nby,
nbz,
d_gf_b,
order,
kappa,
alpha);
}
#ifdef ENABLE_MPI
else
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<false>);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
unsigned int n_blocks = num_wave_vectors / run_block_size;
if (num_wave_vectors % run_block_size)
n_blocks += 1;
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((gpu_compute_influence_function_kernel<false>),
dim3(grid),
dim3(run_block_size),
0,
0,
mesh_dim,
num_wave_vectors,
global_dim,
d_inf_f,
d_k,
b1,
b2,
b3,
pidx,
pdim,
nbx,
nby,
nbz,
d_gf_b,
order,
kappa,
alpha);
}
#endif
}
//! The developer has chosen not to document this function
__global__ void gpu_fix_exclusions_kernel(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const Scalar4* d_pos,
const Scalar* d_charge,
const BoxDim box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const Index2D nli,
Scalar kappa,
Scalar alpha,
unsigned int* d_group_members,
unsigned int group_size)
{
// start by identifying which particle we are to handle
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
const Scalar sqrtpi = sqrtf(M_PI);
unsigned int n_neigh = d_n_neigh[idx];
Scalar4 postypei = __ldg(d_pos + idx);
Scalar3 posi = make_scalar3(postypei.x, postypei.y, postypei.z);
Scalar qi = __ldg(d_charge + idx);
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar virial[6];
for (unsigned int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
unsigned int cur_j = 0;
// prefetch neighbor index
unsigned int next_j = d_nlist[nli(idx, 0)];
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
{
// read the current neighbor index (MEM TRANSFER: 4 bytes)
// prefetch the next value and set the current one
cur_j = next_j;
if (neigh_idx + 1 < n_neigh)
next_j = d_nlist[nli(idx, neigh_idx + 1)];
// get the neighbor's position (MEM TRANSFER: 16 bytes)
Scalar4 postypej = __ldg(d_pos + cur_j);
Scalar3 posj = make_scalar3(postypej.x, postypej.y, postypej.z);
Scalar qj = __ldg(d_charge + cur_j);
// calculate dr (with periodic boundary conditions) (FLOPS: 3)
Scalar3 dx = posi - posj;
// apply periodic boundary conditions: (FLOPS 12)
dx = box.minImage(dx);
// calculate r squared (FLOPS: 5)
Scalar rsq = dot(dx, dx);
Scalar r = sqrtf(rsq);
Scalar qiqj = qi * qj;
Scalar expfac = fast::exp(-alpha * r);
Scalar arg1 = kappa * r - alpha / Scalar(2.0) / kappa;
Scalar arg2 = kappa * r + alpha / Scalar(2.0) / kappa;
Scalar erffac = (::erf(arg1) * expfac + expfac - fast::erfc(arg2) * exp(alpha * r))
/ (Scalar(2.0) * r);
Scalar force_divr
= qiqj
* (expfac * Scalar(2.0) * kappa / sqrtpi * fast::exp(-arg1 * arg1)
- Scalar(0.5) * alpha
* (expfac * ::erfc(arg1) + fast::exp(alpha * r) * fast::erfc(arg2))
- erffac)
/ rsq;
// subtract long-range part of pair-interaction
Scalar pair_eng = -qiqj * erffac;
Scalar force_div2r = Scalar(0.5) * force_divr;
virial[0] += dx.x * dx.x * force_div2r;
virial[1] += dx.x * dx.y * force_div2r;
virial[2] += dx.x * dx.z * force_div2r;
virial[3] += dx.y * dx.y * force_div2r;
virial[4] += dx.y * dx.z * force_div2r;
virial[5] += dx.z * dx.z * force_div2r;
force.x += dx.x * force_divr;
force.y += dx.y * force_divr;
force.z += dx.z * force_divr;
force.w += pair_eng;
}
}
force.w *= Scalar(0.5);
d_force[idx].x += force.x;
d_force[idx].y += force.y;
d_force[idx].z += force.z;
d_force[idx].w += force.w;
for (unsigned int i = 0; i < 6; i++)
d_virial[i * virial_pitch + idx] += virial[i];
}
}
//! The developer has chosen not to document this function
hipError_t gpu_fix_exclusions(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int Nmax,
const Scalar4* d_pos,
const Scalar* d_charge,
const BoxDim& box,
const unsigned int* d_n_ex,
const unsigned int* d_exlist,
const Index2D nex,
Scalar kappa,
Scalar alpha,
unsigned int* d_group_members,
unsigned int group_size,
int block_size)
{
dim3 grid(group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL((gpu_fix_exclusions_kernel),
dim3(grid),
dim3(threads),
0,
0,
d_force,
d_virial,
virial_pitch,
d_pos,
d_charge,
box,
d_n_ex,
d_exlist,
nex,
kappa,
alpha,
d_group_members,
group_size);
return hipSuccess;
}
| 5e34f090b39856020ba60b30d87b6e073b578f54.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "PPPMForceComputeGPU.cuh"
#include "hoomd/TextureTools.h"
// __scalar2int_rd is __float2int_rd in single, __double2int_rd in double
#ifdef SINGLE_PRECISION
#define __scalar2int_rd __float2int_rd
#else
#define __scalar2int_rd __double2int_rd
#endif
#define GPU_PPPM_MAX_ORDER 7
// workaround for HIP bug
#ifdef __HIP_PLATFORM_HCC__
inline __device__ float myAtomicAdd(float* address, float val)
{
unsigned int* address_as_uint = (unsigned int*)address;
unsigned int old = *address_as_uint, assumed;
do
{
assumed = old;
old = atomicCAS(address_as_uint, assumed, __float_as_uint(val + __uint_as_float(assumed)));
} while (assumed != old);
return __uint_as_float(old);
}
#else
inline __device__ float myAtomicAdd(float* address, float val)
{
return atomicAdd(address, val);
}
#endif
//! GPU implementation of sinc(x)==sin(x)/x
__device__ Scalar gpu_sinc(Scalar x)
{
Scalar sinc = 0;
//! Coefficients of a power expansion of sin(x)/x
const Scalar sinc_coeff[] = {Scalar(1.0),
Scalar(-1.0 / 6.0),
Scalar(1.0 / 120.0),
Scalar(-1.0 / 5040.0),
Scalar(1.0 / 362880.0),
Scalar(-1.0 / 39916800.0)};
if (x * x <= Scalar(1.0))
{
Scalar term = Scalar(1.0);
for (unsigned int i = 0; i < 6; ++i)
{
sinc += sinc_coeff[i] * term;
term *= x * x;
}
}
else
{
sinc = fast::sin(x) / x;
}
return sinc;
}
__device__ int3 find_cell(const Scalar3& pos,
const unsigned int& inner_nx,
const unsigned int& inner_ny,
const unsigned int& inner_nz,
const uint3& n_ghost_cells,
const BoxDim& box,
int order,
Scalar3& dr)
{
// compute coordinates in units of the mesh size
Scalar3 f = box.makeFraction(pos);
uchar3 periodic = box.getPeriodic();
Scalar3 reduced_pos
= make_scalar3(f.x * (Scalar)inner_nx, f.y * (Scalar)inner_ny, f.z * (Scalar)inner_nz);
reduced_pos += make_scalar3(n_ghost_cells.x, n_ghost_cells.y, n_ghost_cells.z);
Scalar shift, shiftone;
if (order % 2)
{
shift = Scalar(0.5);
shiftone = Scalar(0.0);
}
else
{
shift = Scalar(0.0);
shiftone = Scalar(0.5);
}
int ix = __scalar2int_rd(reduced_pos.x + shift);
int iy = __scalar2int_rd(reduced_pos.y + shift);
int iz = __scalar2int_rd(reduced_pos.z + shift);
// set distance to cell center
dr.x = shiftone + (Scalar)ix - reduced_pos.x;
dr.y = shiftone + (Scalar)iy - reduced_pos.y;
dr.z = shiftone + (Scalar)iz - reduced_pos.z;
// handle particles on the boundary
if (periodic.x && ix == (int)inner_nx)
ix = 0;
if (periodic.y && iy == (int)inner_ny)
iy = 0;
if (periodic.z && iz == (int)inner_nz)
iz = 0;
return make_int3(ix, iy, iz);
}
__global__ void gpu_assign_particles_kernel(const uint3 mesh_dim,
const uint3 n_ghost_bins,
unsigned int work_size,
const unsigned int* d_index_array,
const Scalar4* d_postype,
const Scalar* d_charge,
hipfftComplex* d_mesh,
Scalar V_cell,
int order,
unsigned int offset,
BoxDim box,
const Scalar* d_rho_coeff)
{
extern __shared__ Scalar s_coeff[];
// load in interpolation coefficients
unsigned int ncoeffs = order * (2 * order + 1);
for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < ncoeffs)
{
s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x];
}
}
__syncthreads();
unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx >= work_size)
return;
unsigned int group_idx = work_idx + offset;
int3 bin_dim = make_int3(mesh_dim.x + 2 * n_ghost_bins.x,
mesh_dim.y + 2 * n_ghost_bins.y,
mesh_dim.z + 2 * n_ghost_bins.z);
// grid coordinates of bin (column-major)
unsigned int idx = d_index_array[group_idx];
Scalar4 postype = d_postype[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar qi = d_charge[idx];
// compute coordinates in units of the cell size
Scalar3 dr = make_scalar3(0, 0, 0);
int3 bin_coord
= find_cell(pos, mesh_dim.x, mesh_dim.y, mesh_dim.z, n_ghost_bins, box, order, dr);
// ignore particles that are not within our domain (the error should be caught by HOOMD's cell
// list)
if (bin_coord.x < 0 || bin_coord.x >= bin_dim.x || bin_coord.y < 0 || bin_coord.y >= bin_dim.y
|| bin_coord.z < 0 || bin_coord.z >= bin_dim.z)
{
return;
}
int i = bin_coord.x;
int j = bin_coord.y;
int k = bin_coord.z;
int nlower = -(order - 1) / 2;
int nupper = order / 2;
Scalar result;
int mult_fact = 2 * order + 1;
Scalar x0 = qi;
bool ignore_x = false;
bool ignore_y = false;
bool ignore_z = false;
// loop over neighboring bins
for (int l = nlower; l <= nupper; ++l)
{
// precalculate assignment factor
result = Scalar(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
result = s_coeff[l - nlower + iorder * mult_fact] + result * dr.x;
}
Scalar y0 = x0 * result;
int neighi = i + l;
if (neighi >= (int)bin_dim.x)
{
if (!n_ghost_bins.x)
neighi -= (int)bin_dim.x;
else
ignore_x = true;
}
else if (neighi < 0)
{
if (!n_ghost_bins.x)
neighi += (int)bin_dim.x;
else
ignore_x = true;
}
for (int m = nlower; m <= nupper; ++m)
{
result = Scalar(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
result = s_coeff[m - nlower + iorder * mult_fact] + result * dr.y;
}
Scalar z0 = y0 * result;
int neighj = j + m;
if (neighj >= (int)bin_dim.y)
{
if (!n_ghost_bins.y)
neighj -= (int)bin_dim.y;
else
ignore_y = true;
}
else if (neighj < 0)
{
if (!n_ghost_bins.y)
neighj += (int)bin_dim.y;
else
ignore_y = true;
}
for (int n = nlower; n <= nupper; ++n)
{
result = Scalar(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
result = s_coeff[n - nlower + iorder * mult_fact] + result * dr.z;
}
int neighk = k + n;
if (neighk >= (int)bin_dim.z)
{
if (!n_ghost_bins.z)
neighk -= (int)bin_dim.z;
else
ignore_z = true;
}
else if (neighk < 0)
{
if (!n_ghost_bins.z)
neighk += (int)bin_dim.z;
else
ignore_z = true;
}
if (!ignore_x && !ignore_y && !ignore_z)
{
// write out to global memory using row-major
unsigned int cell_idx = neighi + bin_dim.x * (neighj + bin_dim.y * neighk);
// compute fraction of particle density assigned to cell
// from particles in this bin
myAtomicAdd(&d_mesh[cell_idx].x, z0 * result / V_cell);
}
ignore_z = false;
}
ignore_y = false;
}
ignore_x = false;
} // end of loop over neighboring bins
}
__global__ void gpu_reduce_meshes(const unsigned int mesh_elements,
const hipfftComplex* d_mesh_scratch,
hipfftComplex* d_mesh,
unsigned int ngpu)
{
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= mesh_elements)
return;
hipfftComplex res;
res.x = 0;
res.y = 0;
// reduce over all temporary meshes
for (unsigned int igpu = 0; igpu < ngpu; ++igpu)
{
hipfftComplex m = d_mesh_scratch[idx + igpu * mesh_elements];
res.x += m.x;
res.y += m.y;
}
d_mesh[idx] = res;
}
void gpu_assign_particles(const uint3 mesh_dim,
const uint3 n_ghost_bins,
const uint3 grid_dim,
unsigned int group_size,
const unsigned int* d_index_array,
const Scalar4* d_postype,
const Scalar* d_charge,
hipfftComplex* d_mesh,
hipfftComplex* d_mesh_scratch,
const unsigned int mesh_elements,
int order,
const BoxDim& box,
unsigned int block_size,
const Scalar* d_rho_coeff,
const hipDeviceProp_t& dev_prop,
const GPUPartition& gpu_partition)
{
hipMemsetAsync(d_mesh, 0, sizeof(hipfftComplex) * grid_dim.x * grid_dim.y * grid_dim.z);
Scalar V_cell = box.getVolume() / (Scalar)(mesh_dim.x * mesh_dim.y * mesh_dim.z);
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_assign_particles_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
while (attr.sharedSizeBytes >= dev_prop.sharedMemPerBlock)
{
run_block_size -= dev_prop.warpSize;
}
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
unsigned int ngpu = gpu_partition.getNumActiveGPUs();
for (int idev = ngpu - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
if (ngpu > 1)
{
// zero the temporary mesh array
hipMemsetAsync(d_mesh_scratch + idev * mesh_elements,
0,
sizeof(hipfftComplex) * mesh_elements);
}
unsigned int nwork = range.second - range.first;
unsigned int n_blocks = nwork / run_block_size + 1;
const size_t shared_bytes = order * (2 * order + 1) * sizeof(Scalar);
hipLaunchKernelGGL((gpu_assign_particles_kernel),
dim3(n_blocks),
dim3(run_block_size),
shared_bytes,
0,
mesh_dim,
n_ghost_bins,
nwork,
d_index_array,
d_postype,
d_charge,
ngpu > 1 ? d_mesh_scratch + idev * mesh_elements : d_mesh,
V_cell,
order,
range.first,
box,
d_rho_coeff);
}
}
//! Reduce temporary arrays for every GPU
void gpu_reduce_meshes(const unsigned int mesh_elements,
const hipfftComplex* d_mesh_scratch,
hipfftComplex* d_mesh,
const unsigned int ngpu,
const unsigned int block_size)
{
// reduce meshes on GPU 0
hipLaunchKernelGGL((gpu_reduce_meshes),
dim3(mesh_elements / block_size + 1),
dim3(block_size),
0,
0,
mesh_elements,
d_mesh_scratch,
d_mesh,
ngpu);
}
__global__ void gpu_compute_mesh_virial_kernel(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
Scalar* d_inf_f,
Scalar* d_virial_mesh,
const Scalar3* d_k,
const bool exclude_dc,
Scalar kappa)
{
unsigned int idx;
idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= n_wave_vectors)
return;
if (!exclude_dc || idx != 0)
{
// non-zero wave vector
hipfftComplex fourier = d_fourier_mesh[idx];
Scalar3 k = d_k[idx];
Scalar rhog = (fourier.x * fourier.x + fourier.y * fourier.y) * d_inf_f[idx];
Scalar vterm = -Scalar(2.0) * (Scalar(1.0) / dot(k, k) + Scalar(0.25) / (kappa * kappa));
d_virial_mesh[0 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.x * k.x); // xx
d_virial_mesh[1 * n_wave_vectors + idx] = rhog * (vterm * k.x * k.y); // xy
d_virial_mesh[2 * n_wave_vectors + idx] = rhog * (vterm * k.x * k.z); // xz
d_virial_mesh[3 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.y * k.y); // yy
d_virial_mesh[4 * n_wave_vectors + idx] = rhog * (vterm * k.y * k.z); // yz
d_virial_mesh[5 * n_wave_vectors + idx] = rhog * (Scalar(1.0) + vterm * k.z * k.z); // zz
}
else
{
d_virial_mesh[0 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[1 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[2 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[3 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[4 * n_wave_vectors + idx] = Scalar(0.0);
d_virial_mesh[5 * n_wave_vectors + idx] = Scalar(0.0);
}
}
void gpu_compute_mesh_virial(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
Scalar* d_inf_f,
Scalar* d_virial_mesh,
const Scalar3* d_k,
const bool exclude_dc,
Scalar kappa)
{
const unsigned int block_size = 256;
dim3 grid(n_wave_vectors / block_size + 1, 1, 1);
hipLaunchKernelGGL((gpu_compute_mesh_virial_kernel),
dim3(grid),
dim3(block_size),
0,
0,
n_wave_vectors,
d_fourier_mesh,
d_inf_f,
d_virial_mesh,
d_k,
exclude_dc,
kappa);
}
__global__ void gpu_update_meshes_kernel(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
hipfftComplex* d_fourier_mesh_G_x,
hipfftComplex* d_fourier_mesh_G_y,
hipfftComplex* d_fourier_mesh_G_z,
const Scalar* d_inf_f,
const Scalar3* d_k,
unsigned int NNN)
{
unsigned int k;
k = blockDim.x * blockIdx.x + threadIdx.x;
if (k >= n_wave_vectors)
return;
hipfftComplex f = d_fourier_mesh[k];
Scalar scaled_inf_f = d_inf_f[k] / ((Scalar)NNN);
Scalar3 kvec = d_k[k];
// Normalization
hipfftComplex fourier_G_x;
fourier_G_x.x = f.y * kvec.x * scaled_inf_f;
fourier_G_x.y = -f.x * kvec.x * scaled_inf_f;
hipfftComplex fourier_G_y;
fourier_G_y.x = f.y * kvec.y * scaled_inf_f;
fourier_G_y.y = -f.x * kvec.y * scaled_inf_f;
hipfftComplex fourier_G_z;
fourier_G_z.x = f.y * kvec.z * scaled_inf_f;
fourier_G_z.y = -f.x * kvec.z * scaled_inf_f;
// store in global memory
d_fourier_mesh_G_x[k] = fourier_G_x;
d_fourier_mesh_G_y[k] = fourier_G_y;
d_fourier_mesh_G_z[k] = fourier_G_z;
}
void gpu_update_meshes(const unsigned int n_wave_vectors,
hipfftComplex* d_fourier_mesh,
hipfftComplex* d_fourier_mesh_G_x,
hipfftComplex* d_fourier_mesh_G_y,
hipfftComplex* d_fourier_mesh_G_z,
const Scalar* d_inf_f,
const Scalar3* d_k,
unsigned int NNN,
unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_update_meshes_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
dim3 grid(n_wave_vectors / run_block_size + 1, 1, 1);
hipLaunchKernelGGL((gpu_update_meshes_kernel),
dim3(grid),
dim3(run_block_size),
0,
0,
n_wave_vectors,
d_fourier_mesh,
d_fourier_mesh_G_x,
d_fourier_mesh_G_y,
d_fourier_mesh_G_z,
d_inf_f,
d_k,
NNN);
}
__global__ void gpu_compute_forces_kernel(const unsigned int work_size,
const Scalar4* d_postype,
Scalar4* d_force,
const uint3 grid_dim,
const uint3 n_ghost_cells,
const Scalar* d_charge,
const BoxDim box,
int order,
const unsigned int* d_index_array,
const hipfftComplex* inv_fourier_mesh_x,
const hipfftComplex* inv_fourier_mesh_y,
const hipfftComplex* inv_fourier_mesh_z,
const Scalar* d_rho_coeff,
const unsigned int offset)
{
extern __shared__ Scalar s_coeff[];
// load in interpolation coefficients
unsigned int ncoeffs = order * (2 * order + 1);
for (unsigned int cur_offset = 0; cur_offset < ncoeffs; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < ncoeffs)
{
s_coeff[cur_offset + threadIdx.x] = d_rho_coeff[cur_offset + threadIdx.x];
}
}
__syncthreads();
unsigned int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx >= work_size)
return;
unsigned int group_idx = work_idx + offset;
unsigned int idx = d_index_array[group_idx];
int3 inner_dim = make_int3(grid_dim.x - 2 * n_ghost_cells.x,
grid_dim.y - 2 * n_ghost_cells.y,
grid_dim.z - 2 * n_ghost_cells.z);
Scalar4 postype = d_postype[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
unsigned int type = __scalar_as_int(postype.w);
Scalar qi = d_charge[idx];
Scalar3 dr = make_scalar3(0, 0, 0);
// find cell the particle is in
int3 cell_coord
= find_cell(pos, inner_dim.x, inner_dim.y, inner_dim.z, n_ghost_cells, box, order, dr);
// ignore particles that are not within our domain (the error should be caught by HOOMD's cell
// list)
if (cell_coord.x < 0 || cell_coord.x >= (int)grid_dim.x || cell_coord.y < 0
|| cell_coord.y >= (int)grid_dim.y || cell_coord.z < 0 || cell_coord.z >= (int)grid_dim.z)
{
return;
}
Scalar3 force = make_scalar3(0.0, 0.0, 0.0);
int nlower = -(order - 1) / 2;
int nupper = order / 2;
Scalar result;
int mult_fact = 2 * order + 1;
// back-interpolate forces from neighboring mesh points
for (int l = nlower; l <= nupper; ++l)
{
result = Scalar(0.0);
for (int k = order - 1; k >= 0; k--)
{
result = s_coeff[l - nlower + k * mult_fact] + result * dr.x;
}
Scalar x0 = result;
for (int m = nlower; m <= nupper; ++m)
{
result = Scalar(0.0);
for (int k = order - 1; k >= 0; k--)
{
result = s_coeff[m - nlower + k * mult_fact] + result * dr.y;
}
Scalar y0 = x0 * result;
for (int n = nlower; n <= nupper; ++n)
{
result = Scalar(0.0);
for (int k = order - 1; k >= 0; k--)
{
result = s_coeff[n - nlower + k * mult_fact] + result * dr.z;
}
Scalar z0 = y0 * result;
int neighl = (int)cell_coord.x + l;
int neighm = (int)cell_coord.y + m;
int neighn = (int)cell_coord.z + n;
if (!n_ghost_cells.x)
{
if (neighl >= (int)grid_dim.x)
neighl -= grid_dim.x;
else if (neighl < 0)
neighl += grid_dim.x;
}
if (!n_ghost_cells.y)
{
if (neighm >= (int)grid_dim.y)
neighm -= grid_dim.y;
else if (neighm < 0)
neighm += grid_dim.y;
}
if (!n_ghost_cells.z)
{
if (neighn >= (int)grid_dim.z)
neighn -= grid_dim.z;
else if (neighn < 0)
neighn += grid_dim.z;
}
// use column-major layout
unsigned int cell_idx = neighl + grid_dim.x * (neighm + grid_dim.y * neighn);
hipfftComplex inv_mesh_x = inv_fourier_mesh_x[cell_idx];
hipfftComplex inv_mesh_y = inv_fourier_mesh_y[cell_idx];
hipfftComplex inv_mesh_z = inv_fourier_mesh_z[cell_idx];
force.x += qi * z0 * inv_mesh_x.x;
force.y += qi * z0 * inv_mesh_y.x;
force.z += qi * z0 * inv_mesh_z.x;
}
}
} // end neighbor cells loop
d_force[idx] = make_scalar4(force.x, force.y, force.z, 0.0);
}
void gpu_compute_forces(const unsigned int N,
const Scalar4* d_postype,
Scalar4* d_force,
const hipfftComplex* d_inv_fourier_mesh_x,
const hipfftComplex* d_inv_fourier_mesh_y,
const hipfftComplex* d_inv_fourier_mesh_z,
const uint3 grid_dim,
const uint3 n_ghost_cells,
const Scalar* d_charge,
const BoxDim& box,
int order,
const unsigned int* d_index_array,
const GPUPartition& gpu_partition,
const GPUPartition& all_gpu_partition,
const Scalar* d_rho_coeff,
unsigned int block_size,
bool local_fft,
unsigned int inv_mesh_elements)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_forces_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = all_gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = all_gpu_partition.getRangeAndSetGPU(idev);
// reset force array for ALL particles
hipMemsetAsync(d_force + range.first, 0, sizeof(Scalar4) * (range.second - range.first));
}
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
unsigned int n_blocks = nwork / run_block_size + 1;
const size_t shared_bytes = order * (2 * order + 1) * sizeof(Scalar);
hipLaunchKernelGGL(
(gpu_compute_forces_kernel),
dim3(n_blocks),
dim3(run_block_size),
shared_bytes,
0,
nwork,
d_postype,
d_force,
grid_dim,
n_ghost_cells,
d_charge,
box,
order,
d_index_array,
local_fft ? d_inv_fourier_mesh_x + idev * inv_mesh_elements : d_inv_fourier_mesh_x,
local_fft ? d_inv_fourier_mesh_y + idev * inv_mesh_elements : d_inv_fourier_mesh_y,
local_fft ? d_inv_fourier_mesh_z + idev * inv_mesh_elements : d_inv_fourier_mesh_z,
d_rho_coeff,
range.first);
}
}
__global__ void kernel_calculate_pe_partial(int n_wave_vectors,
Scalar* sum_partial,
const hipfftComplex* d_fourier_mesh,
const Scalar* d_inf_f,
const bool exclude_dc)
{
HIP_DYNAMIC_SHARED(Scalar, sdata)
unsigned int tidx = threadIdx.x;
unsigned int j;
j = blockDim.x * blockIdx.x + threadIdx.x;
Scalar mySum = Scalar(0.0);
if (j < n_wave_vectors)
{
if (!exclude_dc || j != 0)
{
mySum = d_fourier_mesh[j].x * d_fourier_mesh[j].x
+ d_fourier_mesh[j].y * d_fourier_mesh[j].y;
mySum *= d_inf_f[j];
}
}
sdata[tidx] = mySum;
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (tidx < offs)
{
sdata[tidx] += sdata[tidx + offs];
}
offs >>= 1;
__syncthreads();
}
// write result to global memory
if (tidx == 0)
sum_partial[blockIdx.x] = sdata[0];
}
__global__ void kernel_final_reduce_pe(Scalar* sum_partial, unsigned int nblocks, Scalar* sum)
{
HIP_DYNAMIC_SHARED(Scalar, smem)
if (threadIdx.x == 0)
*sum = Scalar(0.0);
for (int start = 0; start < nblocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < nblocks)
smem[threadIdx.x] = sum_partial[start + threadIdx.x];
else
smem[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
smem[threadIdx.x] += smem[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
*sum += smem[0];
}
}
}
void gpu_compute_pe(unsigned int n_wave_vectors,
Scalar* d_sum_partial,
Scalar* d_sum,
const hipfftComplex* d_fourier_mesh,
const Scalar* d_inf_f,
const unsigned int block_size,
const uint3 mesh_dim,
const bool exclude_dc)
{
unsigned int n_blocks = n_wave_vectors / block_size + 1;
unsigned int shared_size = (unsigned int)(block_size * sizeof(Scalar));
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((kernel_calculate_pe_partial),
dim3(grid),
dim3(block_size),
shared_size,
0,
n_wave_vectors,
d_sum_partial,
d_fourier_mesh,
d_inf_f,
exclude_dc);
// calculate final sum of mesh values
const unsigned int final_block_size = 256;
shared_size = final_block_size * sizeof(Scalar);
hipLaunchKernelGGL((kernel_final_reduce_pe),
dim3(1),
dim3(final_block_size),
shared_size,
0,
d_sum_partial,
n_blocks,
d_sum);
}
__global__ void kernel_calculate_virial_partial(int n_wave_vectors,
Scalar* sum_virial_partial,
const Scalar* d_mesh_virial)
{
HIP_DYNAMIC_SHARED(Scalar, sdata)
unsigned int j;
j = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int tidx = threadIdx.x;
Scalar mySum_xx = Scalar(0.0);
Scalar mySum_xy = Scalar(0.0);
Scalar mySum_xz = Scalar(0.0);
Scalar mySum_yy = Scalar(0.0);
Scalar mySum_yz = Scalar(0.0);
Scalar mySum_zz = Scalar(0.0);
if (j < n_wave_vectors)
{
mySum_xx = d_mesh_virial[0 * n_wave_vectors + j];
mySum_xy = d_mesh_virial[1 * n_wave_vectors + j];
mySum_xz = d_mesh_virial[2 * n_wave_vectors + j];
mySum_yy = d_mesh_virial[3 * n_wave_vectors + j];
mySum_yz = d_mesh_virial[4 * n_wave_vectors + j];
mySum_zz = d_mesh_virial[5 * n_wave_vectors + j];
}
sdata[0 * blockDim.x + tidx] = mySum_xx;
sdata[1 * blockDim.x + tidx] = mySum_xy;
sdata[2 * blockDim.x + tidx] = mySum_xz;
sdata[3 * blockDim.x + tidx] = mySum_yy;
sdata[4 * blockDim.x + tidx] = mySum_yz;
sdata[5 * blockDim.x + tidx] = mySum_zz;
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (tidx < offs)
{
sdata[0 * blockDim.x + tidx] += sdata[0 * blockDim.x + tidx + offs];
sdata[1 * blockDim.x + tidx] += sdata[1 * blockDim.x + tidx + offs];
sdata[2 * blockDim.x + tidx] += sdata[2 * blockDim.x + tidx + offs];
sdata[3 * blockDim.x + tidx] += sdata[3 * blockDim.x + tidx + offs];
sdata[4 * blockDim.x + tidx] += sdata[4 * blockDim.x + tidx + offs];
sdata[5 * blockDim.x + tidx] += sdata[5 * blockDim.x + tidx + offs];
}
offs >>= 1;
__syncthreads();
}
// write result to global memory
if (tidx == 0)
{
sum_virial_partial[0 * gridDim.x + blockIdx.x] = sdata[0 * blockDim.x];
sum_virial_partial[1 * gridDim.x + blockIdx.x] = sdata[1 * blockDim.x];
sum_virial_partial[2 * gridDim.x + blockIdx.x] = sdata[2 * blockDim.x];
sum_virial_partial[3 * gridDim.x + blockIdx.x] = sdata[3 * blockDim.x];
sum_virial_partial[4 * gridDim.x + blockIdx.x] = sdata[4 * blockDim.x];
sum_virial_partial[5 * gridDim.x + blockIdx.x] = sdata[5 * blockDim.x];
}
}
__global__ void
kernel_final_reduce_virial(Scalar* sum_virial_partial, unsigned int nblocks, Scalar* sum_virial)
{
HIP_DYNAMIC_SHARED(Scalar, smem)
if (threadIdx.x == 0)
{
sum_virial[0] = Scalar(0.0);
sum_virial[1] = Scalar(0.0);
sum_virial[2] = Scalar(0.0);
sum_virial[3] = Scalar(0.0);
sum_virial[4] = Scalar(0.0);
sum_virial[5] = Scalar(0.0);
}
for (int start = 0; start < nblocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < nblocks)
{
smem[0 * blockDim.x + threadIdx.x]
= sum_virial_partial[0 * nblocks + start + threadIdx.x];
smem[1 * blockDim.x + threadIdx.x]
= sum_virial_partial[1 * nblocks + start + threadIdx.x];
smem[2 * blockDim.x + threadIdx.x]
= sum_virial_partial[2 * nblocks + start + threadIdx.x];
smem[3 * blockDim.x + threadIdx.x]
= sum_virial_partial[3 * nblocks + start + threadIdx.x];
smem[4 * blockDim.x + threadIdx.x]
= sum_virial_partial[4 * nblocks + start + threadIdx.x];
smem[5 * blockDim.x + threadIdx.x]
= sum_virial_partial[5 * nblocks + start + threadIdx.x];
}
else
{
smem[0 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[1 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[2 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[3 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[4 * blockDim.x + threadIdx.x] = Scalar(0.0);
smem[5 * blockDim.x + threadIdx.x] = Scalar(0.0);
}
__syncthreads();
// reduce the sum
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
smem[0 * blockDim.x + threadIdx.x] += smem[0 * blockDim.x + threadIdx.x + offs];
smem[1 * blockDim.x + threadIdx.x] += smem[1 * blockDim.x + threadIdx.x + offs];
smem[2 * blockDim.x + threadIdx.x] += smem[2 * blockDim.x + threadIdx.x + offs];
smem[3 * blockDim.x + threadIdx.x] += smem[3 * blockDim.x + threadIdx.x + offs];
smem[4 * blockDim.x + threadIdx.x] += smem[4 * blockDim.x + threadIdx.x + offs];
smem[5 * blockDim.x + threadIdx.x] += smem[5 * blockDim.x + threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
sum_virial[0] += smem[0 * blockDim.x];
sum_virial[1] += smem[1 * blockDim.x];
sum_virial[2] += smem[2 * blockDim.x];
sum_virial[3] += smem[3 * blockDim.x];
sum_virial[4] += smem[4 * blockDim.x];
sum_virial[5] += smem[5 * blockDim.x];
}
}
}
void gpu_compute_virial(unsigned int n_wave_vectors,
Scalar* d_sum_virial_partial,
Scalar* d_sum_virial,
const Scalar* d_mesh_virial,
const unsigned int block_size)
{
unsigned int n_blocks = n_wave_vectors / block_size + 1;
unsigned int shared_size = (unsigned int)(6 * block_size * sizeof(Scalar));
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((kernel_calculate_virial_partial),
dim3(grid),
dim3(block_size),
shared_size,
0,
n_wave_vectors,
d_sum_virial_partial,
d_mesh_virial);
// calculate final virial values
const unsigned int final_block_size = 256;
shared_size = 6 * final_block_size * sizeof(Scalar);
hipLaunchKernelGGL((kernel_final_reduce_virial),
dim3(1),
dim3(final_block_size),
shared_size,
0,
d_sum_virial_partial,
n_blocks,
d_sum_virial);
}
template<bool local_fft>
__global__ void gpu_compute_influence_function_kernel(const uint3 mesh_dim,
const unsigned int n_wave_vectors,
const uint3 global_dim,
Scalar* d_inf_f,
Scalar3* d_k,
const Scalar3 b1,
const Scalar3 b2,
const Scalar3 b3,
const uint3 pidx,
const uint3 pdim,
int nbx,
int nby,
int nbz,
const Scalar* gf_b,
int order,
Scalar kappa,
Scalar alpha)
{
unsigned int kidx;
kidx = blockDim.x * blockIdx.x + threadIdx.x;
if (kidx >= n_wave_vectors)
return;
int l, m, n;
if (local_fft)
{
// use row-major layout
int ny = mesh_dim.y;
int nx = mesh_dim.x;
n = kidx / ny / nx;
m = (kidx - n * ny * nx) / nx;
l = kidx % nx;
}
#ifdef ENABLE_MPI
else
{
// local layout: row-major
int ny = mesh_dim.y;
int nx = mesh_dim.x;
int n_local = kidx / ny / nx;
int m_local = (kidx - n_local * ny * nx) / nx;
int l_local = kidx % nx;
// cyclic distribution
l = l_local * pdim.x + pidx.x;
m = m_local * pdim.y + pidx.y;
n = n_local * pdim.z + pidx.z;
}
#endif
// compute Miller indices
if (l >= (int)(global_dim.x / 2 + global_dim.x % 2))
l -= (int)global_dim.x;
if (m >= (int)(global_dim.y / 2 + global_dim.y % 2))
m -= (int)global_dim.y;
if (n >= (int)(global_dim.z / 2 + global_dim.z % 2))
n -= (int)global_dim.z;
Scalar val;
Scalar3 kval = (Scalar)l * b1 + (Scalar)m * b2 + (Scalar)n * b3;
Scalar3 kH = Scalar(2.0 * M_PI)
* make_scalar3(Scalar(1.0) / (Scalar)global_dim.x,
Scalar(1.0) / (Scalar)global_dim.y,
Scalar(1.0) / (Scalar)global_dim.z);
Scalar snx = fast::sin(Scalar(0.5) * l * kH.x);
Scalar snx2 = snx * snx;
Scalar sny = fast::sin(Scalar(0.5) * m * kH.y);
Scalar sny2 = sny * sny;
Scalar snz = fast::sin(Scalar(0.5) * n * kH.z);
Scalar snz2 = snz * snz;
Scalar sx(0.0), sy(0.0), sz(0.0);
for (int iorder = order - 1; iorder >= 0; iorder--)
{
sx = gf_b[iorder] + sx * snx2;
sy = gf_b[iorder] + sy * sny2;
sz = gf_b[iorder] + sz * snz2;
}
Scalar denominator = sx * sy * sz;
denominator *= denominator;
if (l != 0 || m != 0 || n != 0)
{
Scalar sum1(0.0);
Scalar numerator = Scalar(4.0 * M_PI) / dot(kval, kval);
for (int ix = -nbx; ix <= nbx; ix++)
{
Scalar qx = ((Scalar)l + (Scalar)ix * global_dim.x);
Scalar3 knx = qx * b1;
Scalar argx = Scalar(0.5) * qx * kH.x;
Scalar wxs = gpu_sinc(argx);
Scalar wx(1.0);
for (int iorder = 0; iorder < order; ++iorder)
{
wx *= wxs;
}
for (int iy = -nby; iy <= nby; iy++)
{
Scalar qy = ((Scalar)m + (Scalar)iy * global_dim.y);
Scalar3 kny = qy * b2;
Scalar argy = Scalar(0.5) * qy * kH.y;
Scalar wys = gpu_sinc(argy);
Scalar wy(1.0);
for (int iorder = 0; iorder < order; ++iorder)
{
wy *= wys;
}
for (int iz = -nbz; iz <= nbz; iz++)
{
Scalar qz = ((Scalar)n + (Scalar)iz * global_dim.z);
Scalar3 knz = qz * b3;
Scalar argz = Scalar(0.5) * qz * kH.z;
Scalar wzs = gpu_sinc(argz);
Scalar wz(1.0);
for (int iorder = 0; iorder < order; ++iorder)
{
wz *= wzs;
}
Scalar3 kn = knx + kny + knz;
Scalar dot1 = dot(kn, kval);
Scalar dot2 = dot(kn, kn) + alpha * alpha;
Scalar arg_gauss = Scalar(0.25) * dot2 / kappa / kappa;
Scalar gauss = exp(-arg_gauss);
sum1 += (dot1 / dot2) * gauss * wx * wx * wy * wy * wz * wz;
}
}
}
val = numerator * sum1 / denominator;
}
else
{
val = Scalar(0.0);
}
// write out result
d_inf_f[kidx] = val;
d_k[kidx] = kval;
}
void gpu_compute_influence_function(const uint3 mesh_dim,
const uint3 global_dim,
Scalar* d_inf_f,
Scalar3* d_k,
const BoxDim& global_box,
const bool local_fft,
const uint3 pidx,
const uint3 pdim,
const Scalar EPS_HOC,
Scalar kappa,
Scalar alpha,
const Scalar* d_gf_b,
int order,
unsigned int block_size)
{
// compute reciprocal lattice vectors
Scalar3 a1 = global_box.getLatticeVector(0);
Scalar3 a2 = global_box.getLatticeVector(1);
Scalar3 a3 = global_box.getLatticeVector(2);
Scalar V_box = global_box.getVolume();
Scalar3 b1 = Scalar(2.0 * M_PI)
* make_scalar3(a2.y * a3.z - a2.z * a3.y,
a2.z * a3.x - a2.x * a3.z,
a2.x * a3.y - a2.y * a3.x)
/ V_box;
Scalar3 b2 = Scalar(2.0 * M_PI)
* make_scalar3(a3.y * a1.z - a3.z * a1.y,
a3.z * a1.x - a3.x * a1.z,
a3.x * a1.y - a3.y * a1.x)
/ V_box;
Scalar3 b3 = Scalar(2.0 * M_PI)
* make_scalar3(a1.y * a2.z - a1.z * a2.y,
a1.z * a2.x - a1.x * a2.z,
a1.x * a2.y - a1.y * a2.x)
/ V_box;
unsigned int num_wave_vectors = mesh_dim.x * mesh_dim.y * mesh_dim.z;
Scalar3 L = global_box.getL();
Scalar temp = floor(((kappa * L.x / (M_PI * global_dim.x)) * pow(-log(EPS_HOC), 0.25)));
int nbx = (int)temp;
temp = floor(((kappa * L.y / (M_PI * global_dim.y)) * pow(-log(EPS_HOC), 0.25)));
int nby = (int)temp;
temp = floor(((kappa * L.z / (M_PI * global_dim.z)) * pow(-log(EPS_HOC), 0.25)));
int nbz = (int)temp;
if (local_fft)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<true>);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
unsigned int n_blocks = num_wave_vectors / run_block_size;
if (num_wave_vectors % run_block_size)
n_blocks += 1;
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((gpu_compute_influence_function_kernel<true>),
dim3(grid),
dim3(run_block_size),
0,
0,
mesh_dim,
num_wave_vectors,
global_dim,
d_inf_f,
d_k,
b1,
b2,
b3,
pidx,
pdim,
nbx,
nby,
nbz,
d_gf_b,
order,
kappa,
alpha);
}
#ifdef ENABLE_MPI
else
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_influence_function_kernel<false>);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(max_block_size, block_size);
unsigned int n_blocks = num_wave_vectors / run_block_size;
if (num_wave_vectors % run_block_size)
n_blocks += 1;
dim3 grid(n_blocks, 1, 1);
hipLaunchKernelGGL((gpu_compute_influence_function_kernel<false>),
dim3(grid),
dim3(run_block_size),
0,
0,
mesh_dim,
num_wave_vectors,
global_dim,
d_inf_f,
d_k,
b1,
b2,
b3,
pidx,
pdim,
nbx,
nby,
nbz,
d_gf_b,
order,
kappa,
alpha);
}
#endif
}
//! The developer has chosen not to document this function
__global__ void gpu_fix_exclusions_kernel(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const Scalar4* d_pos,
const Scalar* d_charge,
const BoxDim box,
const unsigned int* d_n_neigh,
const unsigned int* d_nlist,
const Index2D nli,
Scalar kappa,
Scalar alpha,
unsigned int* d_group_members,
unsigned int group_size)
{
// start by identifying which particle we are to handle
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
const Scalar sqrtpi = sqrtf(M_PI);
unsigned int n_neigh = d_n_neigh[idx];
Scalar4 postypei = __ldg(d_pos + idx);
Scalar3 posi = make_scalar3(postypei.x, postypei.y, postypei.z);
Scalar qi = __ldg(d_charge + idx);
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar virial[6];
for (unsigned int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
unsigned int cur_j = 0;
// prefetch neighbor index
unsigned int next_j = d_nlist[nli(idx, 0)];
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
{
// read the current neighbor index (MEM TRANSFER: 4 bytes)
// prefetch the next value and set the current one
cur_j = next_j;
if (neigh_idx + 1 < n_neigh)
next_j = d_nlist[nli(idx, neigh_idx + 1)];
// get the neighbor's position (MEM TRANSFER: 16 bytes)
Scalar4 postypej = __ldg(d_pos + cur_j);
Scalar3 posj = make_scalar3(postypej.x, postypej.y, postypej.z);
Scalar qj = __ldg(d_charge + cur_j);
// calculate dr (with periodic boundary conditions) (FLOPS: 3)
Scalar3 dx = posi - posj;
// apply periodic boundary conditions: (FLOPS 12)
dx = box.minImage(dx);
// calculate r squared (FLOPS: 5)
Scalar rsq = dot(dx, dx);
Scalar r = sqrtf(rsq);
Scalar qiqj = qi * qj;
Scalar expfac = fast::exp(-alpha * r);
Scalar arg1 = kappa * r - alpha / Scalar(2.0) / kappa;
Scalar arg2 = kappa * r + alpha / Scalar(2.0) / kappa;
Scalar erffac = (::erf(arg1) * expfac + expfac - fast::erfc(arg2) * exp(alpha * r))
/ (Scalar(2.0) * r);
Scalar force_divr
= qiqj
* (expfac * Scalar(2.0) * kappa / sqrtpi * fast::exp(-arg1 * arg1)
- Scalar(0.5) * alpha
* (expfac * ::erfc(arg1) + fast::exp(alpha * r) * fast::erfc(arg2))
- erffac)
/ rsq;
// subtract long-range part of pair-interaction
Scalar pair_eng = -qiqj * erffac;
Scalar force_div2r = Scalar(0.5) * force_divr;
virial[0] += dx.x * dx.x * force_div2r;
virial[1] += dx.x * dx.y * force_div2r;
virial[2] += dx.x * dx.z * force_div2r;
virial[3] += dx.y * dx.y * force_div2r;
virial[4] += dx.y * dx.z * force_div2r;
virial[5] += dx.z * dx.z * force_div2r;
force.x += dx.x * force_divr;
force.y += dx.y * force_divr;
force.z += dx.z * force_divr;
force.w += pair_eng;
}
}
force.w *= Scalar(0.5);
d_force[idx].x += force.x;
d_force[idx].y += force.y;
d_force[idx].z += force.z;
d_force[idx].w += force.w;
for (unsigned int i = 0; i < 6; i++)
d_virial[i * virial_pitch + idx] += virial[i];
}
}
//! The developer has chosen not to document this function
hipError_t gpu_fix_exclusions(Scalar4* d_force,
Scalar* d_virial,
const size_t virial_pitch,
const unsigned int Nmax,
const Scalar4* d_pos,
const Scalar* d_charge,
const BoxDim& box,
const unsigned int* d_n_ex,
const unsigned int* d_exlist,
const Index2D nex,
Scalar kappa,
Scalar alpha,
unsigned int* d_group_members,
unsigned int group_size,
int block_size)
{
dim3 grid(group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL((gpu_fix_exclusions_kernel),
dim3(grid),
dim3(threads),
0,
0,
d_force,
d_virial,
virial_pitch,
d_pos,
d_charge,
box,
d_n_ex,
d_exlist,
nex,
kappa,
alpha,
d_group_members,
group_size);
return hipSuccess;
}
|
2f4e20a564c8bf34bb5399984f06262f081dd840.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) {
// x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ uint factorD, factorH, factorW;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1];
factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2];
factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimID] * factorD;
const Nd4jLong zCoord3 = coords[dimID + 1] * factorH;
const Nd4jLong zCoord4 = coords[dimID + 2] * factorW;
for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID])
for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1])
for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCDHW) {
hipLaunchKernelGGL(( upsampling3dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling3dBP(sd::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
}
} | 2f4e20a564c8bf34bb5399984f06262f081dd840.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) {
// x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ uint factorD, factorH, factorW;
__shared__ Nd4jLong zLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1];
factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2];
factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimID] * factorD;
const Nd4jLong zCoord3 = coords[dimID + 1] * factorH;
const Nd4jLong zCoord4 = coords[dimID + 2] * factorW;
for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID])
for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1])
for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCDHW) {
upsampling3dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling3dBP(sd::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
}
} |
5c5cd9a6efdfcc9667e2a516179fa7b477326326.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_hadamard_fl.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *wt = NULL;
hipMalloc(&wt, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_hadamard_fl), dim3(gridBlock),dim3(threadBlock), 0, 0, N,wt,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_hadamard_fl), dim3(gridBlock),dim3(threadBlock), 0, 0, N,wt,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_hadamard_fl), dim3(gridBlock),dim3(threadBlock), 0, 0, N,wt,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5c5cd9a6efdfcc9667e2a516179fa7b477326326.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_hadamard_fl.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float *wt = NULL;
cudaMalloc(&wt, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_hadamard_fl<<<gridBlock,threadBlock>>>(N,wt,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_hadamard_fl<<<gridBlock,threadBlock>>>(N,wt,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_hadamard_fl<<<gridBlock,threadBlock>>>(N,wt,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b0dc1227505c5461dd2a95381ca061d78caf5106.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hip/hip_runtime_api.h"
#include "gtest/gtest.h"
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/random.h>
#include <cugraph/algorithms.hpp>
#include <sampling/random_walks.cuh>
#include <raft/handle.hpp>
#include "random_walks_utils.cuh"
#include <algorithm>
#include <iterator>
#include <limits>
#include <numeric>
#include <tuple>
#include <utilities/high_res_timer.hpp>
#include <vector>
namespace { // anonym.
template <typename vertex_t, typename index_t>
void fill_start(raft::handle_t const& handle,
rmm::device_uvector<vertex_t>& d_start,
index_t num_vertices)
{
index_t num_paths = d_start.size();
thrust::transform(handle.get_thrust_policy(),
thrust::make_counting_iterator<index_t>(0),
thrust::make_counting_iterator<index_t>(num_paths),
d_start.begin(),
[num_vertices] __device__(auto indx) { return indx % num_vertices; });
}
} // namespace
namespace impl_details = cugraph::detail::original;
enum class traversal_id_t : int { HORIZONTAL = 0, VERTICAL };
struct RandomWalks_Usecase {
std::string graph_file_full_path{};
bool test_weighted{false};
RandomWalks_Usecase(std::string const& graph_file_path, bool test_weighted)
: test_weighted(test_weighted)
{
if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) {
graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path;
} else {
graph_file_full_path = graph_file_path;
}
};
};
class Tests_RandomWalks
: public ::testing::TestWithParam<std::tuple<traversal_id_t, int, RandomWalks_Usecase>> {
public:
Tests_RandomWalks() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t, typename weight_t>
void run_current_test(std::tuple<traversal_id_t, int, RandomWalks_Usecase> const& configuration)
{
raft::handle_t handle{};
// debuf info:
//
// std::cout << "read graph file: " << configuration.graph_file_full_path << std::endl;
traversal_id_t trv_id = std::get<0>(configuration);
int sampling_id = std::get<1>(configuration);
auto const& target = std::get<2>(configuration);
cugraph::graph_t<vertex_t, edge_t, weight_t, false, false> graph(handle);
std::tie(graph, std::ignore) =
cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false, false>(
handle, target.graph_file_full_path, target.test_weighted, false);
auto graph_view = graph.view();
// call random_walks:
start_random_walks(handle, graph_view, trv_id, sampling_id);
}
template <typename graph_vt>
void start_random_walks(raft::handle_t const& handle,
graph_vt const& graph_view,
traversal_id_t trv_id,
int sampling_id)
{
using vertex_t = typename graph_vt::vertex_type;
using edge_t = typename graph_vt::edge_type;
using weight_t = typename graph_vt::weight_type;
using real_t = float;
edge_t num_paths = 10;
rmm::device_uvector<vertex_t> d_start(num_paths, handle.get_stream());
vertex_t num_vertices = graph_view.number_of_vertices();
fill_start(handle, d_start, num_vertices);
// 0-copy const device view:
//
impl_details::device_const_vector_view<vertex_t, edge_t> d_start_view{d_start.data(),
num_paths};
edge_t max_depth{10};
weight_t p{4};
weight_t q{8};
if (trv_id == traversal_id_t::HORIZONTAL) {
// `node2vec` without alpha buffer:
//
if (sampling_id == 2) {
auto ret_tuple = cugraph::random_walks(
handle,
graph_view,
d_start_view.begin(),
num_paths,
max_depth,
false,
std::make_unique<cugraph::sampling_params_t>(sampling_id, p, q, false));
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
ASSERT_TRUE(test_all_paths);
}
// the alpha buffer case should also be tested for `node2vec`
// and for the others is irrelevant, so this block is necessary
// for any sampling method:
//
{
auto ret_tuple = cugraph::random_walks(
handle,
graph_view,
d_start_view.begin(),
num_paths,
max_depth,
false,
std::make_unique<cugraph::sampling_params_t>(sampling_id, p, q, true));
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
ASSERT_TRUE(test_all_paths);
}
} else { // VERTICAL: needs to be force-called via detail
if (sampling_id == 0) {
impl_details::uniform_selector_t<graph_vt, real_t> selector{handle, graph_view, real_t{0}};
auto ret_tuple = cugraph::detail::random_walks_impl<graph_vt,
decltype(selector),
impl_details::vertical_traversal_t>(
handle, // required to prevent clang-format to separate functin name from its namespace
graph_view,
d_start_view,
max_depth,
selector);
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
if (!test_all_paths)
std::cout << "starting seed on failure: " << std::get<3>(ret_tuple) << '\n';
ASSERT_TRUE(test_all_paths);
} else if (sampling_id == 1) {
impl_details::biased_selector_t<graph_vt, real_t> selector{handle, graph_view, real_t{0}};
auto ret_tuple = cugraph::detail::random_walks_impl<graph_vt,
decltype(selector),
impl_details::vertical_traversal_t>(
handle, // required to prevent clang-format to separate functin name from its namespace
graph_view,
d_start_view,
max_depth,
selector);
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
if (!test_all_paths)
std::cout << "starting seed on failure: " << std::get<3>(ret_tuple) << '\n';
ASSERT_TRUE(test_all_paths);
} else {
impl_details::node2vec_selector_t<graph_vt, real_t> selector{
handle, graph_view, real_t{0}, p, q, num_paths};
auto ret_tuple = cugraph::detail::random_walks_impl<graph_vt,
decltype(selector),
impl_details::vertical_traversal_t>(
handle, // required to prevent clang-format to separate functin name from its namespace
graph_view,
d_start_view,
max_depth,
selector);
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
if (!test_all_paths)
std::cout << "starting seed on failure: " << std::get<3>(ret_tuple) << '\n';
ASSERT_TRUE(test_all_paths);
}
}
}
};
TEST_P(Tests_RandomWalks, Initialize_i32_i32_f)
{
run_current_test<int32_t, int32_t, float>(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
simple_test,
Tests_RandomWalks,
::testing::Combine(::testing::Values(traversal_id_t::HORIZONTAL, traversal_id_t::VERTICAL),
::testing::Values(int{0}, int{1}, int{2}),
::testing::Values(RandomWalks_Usecase("test/datasets/karate.mtx", true),
RandomWalks_Usecase("test/datasets/web-Google.mtx", true),
RandomWalks_Usecase("test/datasets/ljournal-2008.mtx", true),
RandomWalks_Usecase("test/datasets/webbase-1M.mtx", true))));
CUGRAPH_TEST_PROGRAM_MAIN()
| b0dc1227505c5461dd2a95381ca061d78caf5106.cu | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_profiler_api.h"
#include "gtest/gtest.h"
#include <utilities/base_fixture.hpp>
#include <utilities/test_utilities.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/random.h>
#include <cugraph/algorithms.hpp>
#include <sampling/random_walks.cuh>
#include <raft/handle.hpp>
#include "random_walks_utils.cuh"
#include <algorithm>
#include <iterator>
#include <limits>
#include <numeric>
#include <tuple>
#include <utilities/high_res_timer.hpp>
#include <vector>
namespace { // anonym.
template <typename vertex_t, typename index_t>
void fill_start(raft::handle_t const& handle,
rmm::device_uvector<vertex_t>& d_start,
index_t num_vertices)
{
index_t num_paths = d_start.size();
thrust::transform(handle.get_thrust_policy(),
thrust::make_counting_iterator<index_t>(0),
thrust::make_counting_iterator<index_t>(num_paths),
d_start.begin(),
[num_vertices] __device__(auto indx) { return indx % num_vertices; });
}
} // namespace
namespace impl_details = cugraph::detail::original;
enum class traversal_id_t : int { HORIZONTAL = 0, VERTICAL };
struct RandomWalks_Usecase {
std::string graph_file_full_path{};
bool test_weighted{false};
RandomWalks_Usecase(std::string const& graph_file_path, bool test_weighted)
: test_weighted(test_weighted)
{
if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) {
graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path;
} else {
graph_file_full_path = graph_file_path;
}
};
};
class Tests_RandomWalks
: public ::testing::TestWithParam<std::tuple<traversal_id_t, int, RandomWalks_Usecase>> {
public:
Tests_RandomWalks() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
template <typename vertex_t, typename edge_t, typename weight_t>
void run_current_test(std::tuple<traversal_id_t, int, RandomWalks_Usecase> const& configuration)
{
raft::handle_t handle{};
// debuf info:
//
// std::cout << "read graph file: " << configuration.graph_file_full_path << std::endl;
traversal_id_t trv_id = std::get<0>(configuration);
int sampling_id = std::get<1>(configuration);
auto const& target = std::get<2>(configuration);
cugraph::graph_t<vertex_t, edge_t, weight_t, false, false> graph(handle);
std::tie(graph, std::ignore) =
cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false, false>(
handle, target.graph_file_full_path, target.test_weighted, false);
auto graph_view = graph.view();
// call random_walks:
start_random_walks(handle, graph_view, trv_id, sampling_id);
}
template <typename graph_vt>
void start_random_walks(raft::handle_t const& handle,
graph_vt const& graph_view,
traversal_id_t trv_id,
int sampling_id)
{
using vertex_t = typename graph_vt::vertex_type;
using edge_t = typename graph_vt::edge_type;
using weight_t = typename graph_vt::weight_type;
using real_t = float;
edge_t num_paths = 10;
rmm::device_uvector<vertex_t> d_start(num_paths, handle.get_stream());
vertex_t num_vertices = graph_view.number_of_vertices();
fill_start(handle, d_start, num_vertices);
// 0-copy const device view:
//
impl_details::device_const_vector_view<vertex_t, edge_t> d_start_view{d_start.data(),
num_paths};
edge_t max_depth{10};
weight_t p{4};
weight_t q{8};
if (trv_id == traversal_id_t::HORIZONTAL) {
// `node2vec` without alpha buffer:
//
if (sampling_id == 2) {
auto ret_tuple = cugraph::random_walks(
handle,
graph_view,
d_start_view.begin(),
num_paths,
max_depth,
false,
std::make_unique<cugraph::sampling_params_t>(sampling_id, p, q, false));
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
ASSERT_TRUE(test_all_paths);
}
// the alpha buffer case should also be tested for `node2vec`
// and for the others is irrelevant, so this block is necessary
// for any sampling method:
//
{
auto ret_tuple = cugraph::random_walks(
handle,
graph_view,
d_start_view.begin(),
num_paths,
max_depth,
false,
std::make_unique<cugraph::sampling_params_t>(sampling_id, p, q, true));
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
ASSERT_TRUE(test_all_paths);
}
} else { // VERTICAL: needs to be force-called via detail
if (sampling_id == 0) {
impl_details::uniform_selector_t<graph_vt, real_t> selector{handle, graph_view, real_t{0}};
auto ret_tuple = cugraph::detail::random_walks_impl<graph_vt,
decltype(selector),
impl_details::vertical_traversal_t>(
handle, // required to prevent clang-format to separate functin name from its namespace
graph_view,
d_start_view,
max_depth,
selector);
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
if (!test_all_paths)
std::cout << "starting seed on failure: " << std::get<3>(ret_tuple) << '\n';
ASSERT_TRUE(test_all_paths);
} else if (sampling_id == 1) {
impl_details::biased_selector_t<graph_vt, real_t> selector{handle, graph_view, real_t{0}};
auto ret_tuple = cugraph::detail::random_walks_impl<graph_vt,
decltype(selector),
impl_details::vertical_traversal_t>(
handle, // required to prevent clang-format to separate functin name from its namespace
graph_view,
d_start_view,
max_depth,
selector);
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
if (!test_all_paths)
std::cout << "starting seed on failure: " << std::get<3>(ret_tuple) << '\n';
ASSERT_TRUE(test_all_paths);
} else {
impl_details::node2vec_selector_t<graph_vt, real_t> selector{
handle, graph_view, real_t{0}, p, q, num_paths};
auto ret_tuple = cugraph::detail::random_walks_impl<graph_vt,
decltype(selector),
impl_details::vertical_traversal_t>(
handle, // required to prevent clang-format to separate functin name from its namespace
graph_view,
d_start_view,
max_depth,
selector);
// check results:
//
bool test_all_paths = cugraph::test::host_check_rw_paths(handle,
graph_view,
std::get<0>(ret_tuple),
std::get<1>(ret_tuple),
std::get<2>(ret_tuple));
if (!test_all_paths)
std::cout << "starting seed on failure: " << std::get<3>(ret_tuple) << '\n';
ASSERT_TRUE(test_all_paths);
}
}
}
};
TEST_P(Tests_RandomWalks, Initialize_i32_i32_f)
{
run_current_test<int32_t, int32_t, float>(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
simple_test,
Tests_RandomWalks,
::testing::Combine(::testing::Values(traversal_id_t::HORIZONTAL, traversal_id_t::VERTICAL),
::testing::Values(int{0}, int{1}, int{2}),
::testing::Values(RandomWalks_Usecase("test/datasets/karate.mtx", true),
RandomWalks_Usecase("test/datasets/web-Google.mtx", true),
RandomWalks_Usecase("test/datasets/ljournal-2008.mtx", true),
RandomWalks_Usecase("test/datasets/webbase-1M.mtx", true))));
CUGRAPH_TEST_PROGRAM_MAIN()
|
567bb0f546573f8c5c58f7ec02f7131c1d990e9a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates Inter Process Communication
* features new to SDK 4.1 and uses one process per GPU for computation.
* Note: Multiple processes per single device are possible but not recommended.
* In such cases, one should use IPC events for hardware synchronization.
*/
// Includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime includes
#include <hip/hip_runtime_api.h>
// CUDA utilities and system includes
#include <helper_cuda.h>
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
hipIpcEventHandle_t eventHandle;
hipIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
checkCudaErrors(hipGetDeviceCount(&count));
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, i));
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != hipComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
int canAccessPeer_0i, canAccessPeer_i0;
for (i = 1; i < uvaCount; i++)
{
checkCudaErrors(hipDeviceCanAccessPeer(&canAccessPeer_0i, uvaOrdinals[0], uvaOrdinals[i]));
checkCudaErrors(hipDeviceCanAccessPeer(&canAccessPeer_i0, uvaOrdinals[i], uvaOrdinals[0]));
if (canAccessPeer_0i*canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// memory buffer in gpu
int *d_ptr;
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
checkCudaErrors(hipSetDevice(s_mem[index].device));
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
hipEvent_t event[MAX_DEVICES * PROCESSES_PER_DEVICE];
checkCudaErrors(hipMalloc((void **) &d_ptr, DATA_BUF_SIZE * g_processCount * sizeof(int)));
checkCudaErrors(hipIpcGetMemHandle((hipIpcMemHandle_t *) &s_mem[0].memHandle, (void *) d_ptr));
checkCudaErrors(hipMemcpy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int), hipMemcpyHostToDevice));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(hipIpcOpenEventHandle(&event[i], s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(hipEventSynchronize(event[i]));
}
// b.3
procBarrier();
checkCudaErrors(hipMemcpy(h_results, d_ptr + DATA_BUF_SIZE,
DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_ptr));
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
hipEvent_t event;
checkCudaErrors(hipEventCreate(&event, hipEventDisableTiming | hipEventInterprocess));
checkCudaErrors(hipIpcGetEventHandle((hipIpcEventHandle_t *) &s_mem[index].eventHandle, event));
// b.1: wait until proc 0 initializes device memory
procBarrier();
checkCudaErrors(hipIpcOpenMemHandle((void **) &d_ptr, s_mem[0].memHandle,
hipIpcMemLazyEnablePeerAccess));
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
hipLaunchKernelGGL(( simpleKernel), dim3(blocks), dim3(threads), 0, 0, d_ptr + index *DATA_BUF_SIZE, d_ptr, index + 1);
checkCudaErrors(hipEventRecord(event));
// b.2
procBarrier();
checkCudaErrors(hipIpcCloseMemHandle(d_ptr));
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
checkCudaErrors(hipEventDestroy(event));
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
checkCudaErrors(hipSetDevice(s_devices->ordinals[i]));
}
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
| 567bb0f546573f8c5c58f7ec02f7131c1d990e9a.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates Inter Process Communication
* features new to SDK 4.1 and uses one process per GPU for computation.
* Note: Multiple processes per single device are possible but not recommended.
* In such cases, one should use IPC events for hardware synchronization.
*/
// Includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime includes
#include <cuda_runtime_api.h>
// CUDA utilities and system includes
#include <helper_cuda.h>
int *pArgc = NULL;
char **pArgv = NULL;
#define MAX_DEVICES 8
#define PROCESSES_PER_DEVICE 1
#define DATA_BUF_SIZE 4096
#ifdef __linux
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <linux/version.h>
typedef struct ipcCUDA_st
{
int device;
pid_t pid;
cudaIpcEventHandle_t eventHandle;
cudaIpcMemHandle_t memHandle;
} ipcCUDA_t;
typedef struct ipcDevices_st
{
int count;
int ordinals[MAX_DEVICES];
} ipcDevices_t;
typedef struct ipcBarrier_st
{
int count;
bool sense;
bool allExit;
} ipcBarrier_t;
ipcBarrier_t *g_barrier = NULL;
bool g_procSense;
int g_processCount;
void procBarrier()
{
int newCount = __sync_add_and_fetch(&g_barrier->count, 1);
if (newCount == g_processCount)
{
g_barrier->count = 0;
g_barrier->sense = !g_procSense;
}
else
{
while (g_barrier->sense == g_procSense)
{
if (!g_barrier->allExit)
{
sched_yield();
}
else
{
exit(EXIT_FAILURE);
}
}
}
g_procSense = !g_procSense;
}
// CUDA Kernel
__global__ void simpleKernel(int *dst, int *src, int num)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] / num;
}
void getDeviceCount(ipcDevices_t *devices)
{
// We can't initialize CUDA before fork() so we need to spawn a new process
pid_t pid = fork();
if (0 == pid)
{
int i;
int count, uvaCount = 0;
int uvaOrdinals[MAX_DEVICES];
printf("\nChecking for multiple GPUs...\n");
checkCudaErrors(cudaGetDeviceCount(&count));
printf("CUDA-capable device count: %i\n", count);
printf("\nSearching for UVA capable devices...\n");
for (i = 0; i < count; i++)
{
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, i));
if (prop.unifiedAddressing)
{
uvaOrdinals[uvaCount] = i;
printf("> GPU%d = \"%15s\" IS capable of UVA\n", i, prop.name);
uvaCount += 1;
}
if (prop.computeMode != cudaComputeModeDefault)
{
printf("> GPU device must be in Compute Mode Default to run\n");
printf("> Please use nvidia-smi to change the Compute Mode to Default\n");
exit(EXIT_SUCCESS);
}
}
devices->ordinals[0] = uvaOrdinals[0];
if (uvaCount < 2)
{
devices->count = uvaCount;
exit(EXIT_SUCCESS);
}
// Check possibility for peer accesses, relevant to our tests
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
devices->count = 1;
int canAccessPeer_0i, canAccessPeer_i0;
for (i = 1; i < uvaCount; i++)
{
checkCudaErrors(cudaDeviceCanAccessPeer(&canAccessPeer_0i, uvaOrdinals[0], uvaOrdinals[i]));
checkCudaErrors(cudaDeviceCanAccessPeer(&canAccessPeer_i0, uvaOrdinals[i], uvaOrdinals[0]));
if (canAccessPeer_0i*canAccessPeer_i0)
{
devices->ordinals[devices->count] = uvaOrdinals[i];
printf("> Two-way peer access between GPU%d and GPU%d: YES\n", devices->ordinals[0], devices->ordinals[devices->count]);
devices->count += 1;
}
}
exit(EXIT_SUCCESS);
}
else
{
int status;
waitpid(pid, &status, 0);
assert(!status);
}
}
inline bool IsAppBuiltAs64()
{
return sizeof(void*) == 8;
}
void runTestMultiKernel(ipcCUDA_t *s_mem, int index)
{
/*
* a) Process 0 loads a reference buffer into GPU0 memory
* b) Other processes launch a kernel on the GPU0 memory using P2P
* c) Process 0 checks the resulting buffer
*/
// memory buffer in gpu
int *d_ptr;
// reference buffer in host memory (do in all processes for rand() consistency)
int h_refData[DATA_BUF_SIZE];
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
h_refData[i] = rand();
}
checkCudaErrors(cudaSetDevice(s_mem[index].device));
if (index == 0)
{
printf("\nLaunching kernels...\n");
// host memory buffer for checking results
int h_results[DATA_BUF_SIZE * MAX_DEVICES * PROCESSES_PER_DEVICE];
cudaEvent_t event[MAX_DEVICES * PROCESSES_PER_DEVICE];
checkCudaErrors(cudaMalloc((void **) &d_ptr, DATA_BUF_SIZE * g_processCount * sizeof(int)));
checkCudaErrors(cudaIpcGetMemHandle((cudaIpcMemHandle_t *) &s_mem[0].memHandle, (void *) d_ptr));
checkCudaErrors(cudaMemcpy((void *) d_ptr, (void *) h_refData, DATA_BUF_SIZE * sizeof(int), cudaMemcpyHostToDevice));
// b.1: wait until all event handles are created in other processes
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(cudaIpcOpenEventHandle(&event[i], s_mem[i].eventHandle));
}
// b.2: wait until all kernels launched and events recorded
procBarrier();
for (int i = 1; i < g_processCount; i++)
{
checkCudaErrors(cudaEventSynchronize(event[i]));
}
// b.3
procBarrier();
checkCudaErrors(cudaMemcpy(h_results, d_ptr + DATA_BUF_SIZE,
DATA_BUF_SIZE * (g_processCount - 1) * sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_ptr));
printf("Checking test results...\n");
for (int n = 1; n < g_processCount; n++)
{
for (int i = 0; i < DATA_BUF_SIZE; i++)
{
if (h_refData[i]/(n + 1) != h_results[(n-1) * DATA_BUF_SIZE + i])
{
fprintf(stderr, "Data check error at index %d in process %d!: %i, %i\n",i,
n, h_refData[i], h_results[(n-1) * DATA_BUF_SIZE + i]);
g_barrier->allExit = true;
exit(EXIT_FAILURE);
}
}
}
}
else
{
cudaEvent_t event;
checkCudaErrors(cudaEventCreate(&event, cudaEventDisableTiming | cudaEventInterprocess));
checkCudaErrors(cudaIpcGetEventHandle((cudaIpcEventHandle_t *) &s_mem[index].eventHandle, event));
// b.1: wait until proc 0 initializes device memory
procBarrier();
checkCudaErrors(cudaIpcOpenMemHandle((void **) &d_ptr, s_mem[0].memHandle,
cudaIpcMemLazyEnablePeerAccess));
printf("> Process %3d: Run kernel on GPU%d, taking source data from and writing results to process %d, GPU%d...\n",
index, s_mem[index].device, 0, s_mem[0].device);
const dim3 threads(512, 1);
const dim3 blocks(DATA_BUF_SIZE / threads.x, 1);
simpleKernel<<<blocks, threads>>> (d_ptr + index *DATA_BUF_SIZE, d_ptr, index + 1);
checkCudaErrors(cudaEventRecord(event));
// b.2
procBarrier();
checkCudaErrors(cudaIpcCloseMemHandle(d_ptr));
// b.3: wait till all the events are used up by proc g_processCount - 1
procBarrier();
checkCudaErrors(cudaEventDestroy(event));
}
}
#endif
int main(int argc, char **argv)
{
pArgc = &argc;
pArgv = argv;
#if CUDART_VERSION >= 4010 && defined(__linux)
if (!IsAppBuiltAs64())
{
printf("%s is only supported on 64-bit Linux OS and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
printf("%s is only supported with Linux OS kernel version 2.6.18 and higher. Test is being waived.\n", argv[0]);
exit(EXIT_WAIVED);
#endif
ipcDevices_t *s_devices = (ipcDevices_t *) mmap(NULL, sizeof(*s_devices),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_devices);
// We can't initialize CUDA before fork() so we need to spawn a new process
getDeviceCount(s_devices);
if (s_devices->count < 1)
{
printf("One or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
exit(EXIT_SUCCESS);
}
// initialize our process and barrier data
// if there is more than one device, 1 process per device
if (s_devices->count > 1)
{
g_processCount = PROCESSES_PER_DEVICE * s_devices->count;
}
else
{
g_processCount = 2; // two processes per single device
}
g_barrier = (ipcBarrier_t *) mmap(NULL, sizeof(*g_barrier),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != g_barrier);
memset((void *) g_barrier, 0, sizeof(*g_barrier));
// set local barrier sense flag
g_procSense = 0;
// shared memory for CUDA memory an event handlers
ipcCUDA_t *s_mem = (ipcCUDA_t *) mmap(NULL, g_processCount * sizeof(*s_mem),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
assert(MAP_FAILED != s_mem);
// initialize shared memory
memset((void *) s_mem, 0, g_processCount * sizeof(*s_mem));
printf("\nSpawning processes and assigning GPUs...\n");
// index = 0,.., g_processCount - 1
int index = 0;
// spawn "g_processCount - 1" additional processes
for (int i = 1; i < g_processCount; i++)
{
int pid = fork();
if (!pid)
{
index = i;
break;
}
else
{
s_mem[i].pid = pid;
}
}
// distribute UVA capable devices among processes (1 device per PROCESSES_PER_DEVICE processes)
// if there is only one device, have 1 extra process
if (s_devices->count > 1)
{
s_mem[index].device = s_devices->ordinals[ index / PROCESSES_PER_DEVICE ];
}
else
{
s_mem[0].device = s_mem[1].device = s_devices->ordinals[ 0 ];
}
printf("> Process %3d -> GPU%d\n", index, s_mem[index].device);
// launch our test
runTestMultiKernel(s_mem, index);
// Cleanup and shutdown
if (index == 0)
{
// wait for processes to complete
for (int i = 1; i < g_processCount; i++)
{
int status;
waitpid(s_mem[i].pid, &status, 0);
assert(WIFEXITED(status));
}
printf("\nShutting down...\n");
for (int i = 0; i < s_devices->count; i++)
{
checkCudaErrors(cudaSetDevice(s_devices->ordinals[i]));
}
exit(EXIT_SUCCESS);
}
#else // Using CUDA 4.0 and older or non Linux OS
printf("simpleIPC requires CUDA 4.1 and Linux to build and run, waiving testing\n\n");
exit(EXIT_WAIVED);
#endif
}
|
fda85e4eae95f0ee69666a797d275b69e14ab07d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_NC(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint K, float rcpK, int relu)
{
uint tid = threadIdx.x;
uint n = blockIdx.x;
uint offset = n*K + tid;
// Mean
V v_mean1, v_mean2;
ew_zero(v_mean1);
ew_zero(v_mean2);
#pragma unroll 4
for (uint k = tid, offsetX = offset; k < K; k += THREADS, offsetX += THREADS)
{
// Single pass over X to compute mean and variance
// var(x) == mean(x**2) - mean(x)**2
V x = load(add_ptr_u(X, offsetX));
v_mean1 = ew_add(v_mean1, x);
v_mean2 = ew_add(v_mean2, ew_sqr(x));
}
float2 mean;
mean.x = ew_sum(v_mean1) * rcpK;
mean.y = ew_sum(v_mean2) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
{
mean.x += shfl_xor(mean.x, i);
mean.y += shfl_xor(mean.y, i);
}
// if using more than 1 warp, further reduced with shared memory
if (THREADS > 32)
{
__shared__ float2 Share[THREADS/32];
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = mean;
__syncthreads();
if (tid < THREADS/32)
{
// first warp loads all prior reductions
mean = Share[tid];
// reduce within this first warp
for (int i = THREADS/64; i > 0; i >>= 1)
{
mean.x += shfl_xor(mean.x, i);
mean.y += shfl_xor(mean.y, i);
}
// outputs final reduction to shared
if (tid == 0)
Share[0] = mean;
}
__syncthreads();
// broadcast result to all threads
mean = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float rstd = rsqrtf((mean.y - ew_sqr(mean.x)) + epsilon);
if (tid == 0)
{
Mean[n] = mean.x;
Rstd[n] = rstd;
}
// Norm/Gain/Bias
#pragma unroll 4
for (uint k = tid; k < K; k += THREADS, offset += THREADS)
{
V x = load(add_ptr_u(X, offset));
V g = load(G, k);
V b = load(B, k);
V xhat = ew_mul(ew_sub(x, mean.x), rstd);
V y = ew_add(ew_mul(xhat, g), b);
if (relu)
y = ew_relu(y);
store(add_ptr_u(Y, offset), y);
}
}
template <typename T, typename V>
bool LayerNormForward_NC(hipStream_t stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, int K, int N, float rcpK, int relu)
{
dim3 grid(N, 1, 1);
if ((K & 3) == 0)
{
K >>= 2; // use vector loads
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_NC<V,float4,256>), dim3(grid), dim3(256),0,stream, Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_NC<V,float4, 32>), dim3(grid), dim3(32),0,stream, Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
}
else
{
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_NC<T,float ,256>), dim3(grid), dim3(256),0,stream, y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_NC<T,float , 32>), dim3(grid), dim3(32),0,stream, y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormForward_NC<float,float4>(hipStream_t stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T, int U>
__global__ void __launch_bounds__(32) layer_norm_dg_db_NC(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, int N, int relu)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int shift = 5 - U; // 4
int mask = (1 << shift) - 1; // 15
int k = (bid << shift) + (tid & mask); // b*16 + 0-15
int n0 = (tid >> shift) << 2; // 0,4
int nk = n0*K + k;
bool b = k < K;
int strideK = K << (2 + U);
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = Gain[k];
bias = Bias[k];
}
for (int n = n0; n < N; n += (4 << U))
{
int n1 = n + 1;
int n2 = n + 2;
int n3 = n + 3;
int nk1 = nk + K;
int nk2 = nk1 + K;
int nk3 = nk2 + K;
float x0 = load( X, nk, b);
float x1 = load( X, nk1, b && (n1 < N));
float x2 = load( X, nk2, b && (n2 < N));
float x3 = load( X, nk3, b && (n3 < N));
float dy0 = load(DY, nk, b);
float dy1 = load(DY, nk1, b && (n1 < N));
float dy2 = load(DY, nk2, b && (n2 < N));
float dy3 = load(DY, nk3, b && (n3 < N));
float mean0 = Mean[n];
float rstd0 = Rstd[n];
float mean1 = 0.0f, rstd1 = 0.0f;
float mean2 = 0.0f, rstd2 = 0.0f;
float mean3 = 0.0f, rstd3 = 0.0f;
if (n1 < N)
{
mean1 = Mean[n1];
rstd1 = Rstd[n1];
}
if (n2 < N)
{
mean2 = Mean[n2];
rstd2 = Rstd[n2];
}
if (n3 < N)
{
mean3 = Mean[n3];
rstd3 = Rstd[n3];
}
float xhat0 = (x0 - mean0) * rstd0;
float xhat1 = (x1 - mean1) * rstd1;
float xhat2 = (x2 - mean2) * rstd2;
float xhat3 = (x3 - mean3) * rstd3;
if (relu)
{
dy0 = ew_relu_grad(dy0, xhat0 * gain + bias);
dy1 = ew_relu_grad(dy1, xhat1 * gain + bias);
dy2 = ew_relu_grad(dy2, xhat2 * gain + bias);
dy3 = ew_relu_grad(dy3, xhat3 * gain + bias);
}
dg += dy0 * xhat0;
dg += dy1 * xhat1;
dg += dy2 * xhat2;
dg += dy3 * xhat3;
db += dy0;
db += dy1;
db += dy2;
db += dy3;
nk += strideK;
}
#pragma unroll
for (int i = 16; i > (1 << (4-U)); i >>= 1)
{
dg += shfl_xor(dg, i);
db += shfl_xor(db, i);
}
store(DG, dg, k, b);
store(DB, db, k, b);
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_dx_NC(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, float rcpK, int relu)
{
__shared__ float Share1[THREADS>>5];
__shared__ float Share2[THREADS>>5];
int tid = threadIdx.x;
int n = blockIdx.x;
int offset = n*K + tid;
float mean = Mean[n];
float rstd = Rstd[n];
const T* X1 = X + offset;
const T* Y1 = DY + offset;
V v_sum1, v_sum2;
ew_zero(v_sum1);
ew_zero(v_sum2);
for (int k = tid; k < K; k += THREADS)
{
V x = load(X1);
V dy = load(Y1);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
v_sum1 = ew_add(v_sum1, ew_mul(dy, xhat));
v_sum2 = ew_add(v_sum2, dy);
X1 += THREADS;
Y1 += THREADS;
}
float sum1 = ew_sum(v_sum1);
float sum2 = ew_sum(v_sum2);
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// first thread of each warp store to shared
if ((tid & 31) == 0)
{
Share1[tid >> 5] = sum1;
Share2[tid >> 5] = sum2;
}
__syncthreads();
if (tid < (THREADS>>5))
{
// first warp loads all prior reductions
sum1 = Share1[tid];
sum2 = Share2[tid];
// reduce within this last warp
#pragma unroll
for (int i = (THREADS>>6); i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// outputs final reduction to shared
Share1[tid] = sum1;
Share2[tid] = sum2;
}
__syncthreads();
// broadcast result to all threads
sum1 = Share1[0];
sum2 = Share2[0];
X += offset;
DY += offset;
DX += offset;
for (int k = tid; k < K; k += THREADS)
{
V x = load(X);
V dy = load(DY);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd);
store(DX, dx);
X += THREADS;
DY += THREADS;
DX += THREADS;
}
}
template <typename T, typename V>
bool LayerNormBackward_NC(hipStream_t stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, int K, int N, float rcpK, int relu)
{
int K32 = K >> 5;
// optimize layer_norm_backward1 for highest occupancy
if (K32 >= 28*16)
{
int gridK = K32 + ((K & 31) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,0>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*8)
{
int gridK = (K >> 4) + ((K & 15) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,1>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*4)
{
int gridK = (K >> 3) + ((K & 7) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,2>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else
{
int gridK = (K >> 2) + ((K & 3) != 0);
hipLaunchKernelGGL(( layer_norm_dg_db_NC<T,3>), dim3(gridK), dim3(32), 0, stream, dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
if ((K & 3) == 0)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* Gain = (const float4*)g;
const float4* Bias = (const float4*)b;
K >>= 2;
//if (K >= 1024)
// layer_norm_dx_NC<VB,VF,float4,1024><<<N,1024,0,stream>>>(DX, DY, X, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_dx_NC<V,float4, 256>), dim3(N), dim3(256),0,stream, DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_dx_NC<V,float4, 64>), dim3(N), dim3(64),0,stream, DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
}
else
{
//if (K >= 1024)
// layer_norm_dx_NC<B,F,float,1024><<<N,1024,0,stream>>>(dx, (const B*)dx, x, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
hipLaunchKernelGGL(( layer_norm_dx_NC<T,float, 256>), dim3(N), dim3(256),0,stream, dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
else
hipLaunchKernelGGL(( layer_norm_dx_NC<T,float, 64>), dim3(N), dim3(64),0,stream, dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormBackward_NC<float,float4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_nc(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint s = blockIdx.x;
uint n = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
// Load X
V xval[U];
X = add_ptr_u(X, offset);
for (int i = 0; i < U; i++)
xval[i] = load(X, i*32, t + i*32 < K);
// Begin mean/variance reductions
V mean1[U], mean2[U];
for (int i = 0; i < U; i++)
{
mean1[i] = xval[i];
mean2[i] = ew_sqr(xval[i]);
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
mean1[i] = ew_add(mean1[i], mean1[i+j]);
mean2[i] = ew_add(mean2[i], mean2[i+j]);
}
float2 stats;
stats.x = ew_sum(mean1[0]) * rcpK;
stats.y = ew_sum(mean2[0]) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = stats;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// final reduction to shared
Share[tid] = stats;
}
__syncthreads();
stats = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float mean = stats.x;
float rstd = rsqrtf((stats.y - mean*mean) + epsilon);
if (tid == 0)
{
__stg(add_ptr_u(Mean, m), mean);
__stg(add_ptr_u(Rstd, m), rstd);
}
// Load Gain/Bias
G = add_ptr_u(G, k);
B = add_ptr_u(B, k);
V gain[U], bias[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
gain[i] = load(G, i*32, b);
bias[i] = load(B, i*32, b);
}
// Compute and output norm
Y = add_ptr_u(Y, offset);
for (int i = 0; i < U; i++)
{
V xhat = ew_mul(ew_sub(xval[i], mean), rstd);
V y = ew_add(ew_mul(xhat, gain[i]), bias[i]);
if (relu)
y = ew_relu(y);
store(Y, y, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedForward_NC(hipStream_t stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, uint N, uint S, uint K, float rcpK, int relu)
{
dim3 grid(S, N, 1);
if ((K & 3) == 0)
{
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_nc<V,float4,2>), dim3(grid),dim3(threads),0,stream, Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_nc<V,float4,1>), dim3(grid),dim3(threads),0,stream, Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_nc<T,float,8>), dim3(grid),dim3(threads),0,stream, y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_nc<T,float,4>), dim3(grid),dim3(threads),0,stream, y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedForward_NC<float,float4>(hipStream_t stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T>
__global__ void __launch_bounds__(32) layer_norm_segmented_dg_db_nc(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint SKz, uint K, int relu)
{
uint tid = threadIdx.x;
uint bk = blockIdx.x;
uint bs = blockIdx.y;
uint bn = blockIdx.z;
uint t = bk*32 + tid;
uint k = bs*K + t;
bool b = t < K;
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = __ldg(add_ptr_u(Gain, k));
bias = __ldg(add_ptr_u(Bias, k));
}
#pragma unroll 1
for (uint n = bn, m = bs*N + bn, nk = bn*SK + k; n < N; n += gridDim.z, m += gridDim.z, nk += SKz)
{
float x = load(add_ptr_u(X, nk), 0, b);
float dy = load(add_ptr_u(DY, nk), 0, b);
float mean = load(add_ptr_u(Mean, m));
float rstd = load(add_ptr_u(Rstd, m));
float xhat = (x - mean) * rstd;
if (relu)
dy = ew_relu_grad(dy, xhat * gain + bias);
dg += dy * xhat;
db += dy;
}
if (b)
{
DG = add_ptr_u(DG, k);
DB = add_ptr_u(DB, k);
if (gridDim.z == 1)
{
__stg(DG, dg);
__stg(DB, db);
}
else
{
atomicRed(DG, dg);
atomicRed(DB, db);
}
}
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_dx_nc(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint s = blockIdx.x;
uint n = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
float mean = __ldg(add_ptr_u(Mean, m));
float rstd = __ldg(add_ptr_u(Rstd, m));
X = add_ptr_u(X, offset);
DY = add_ptr_u(DY, offset);
Gain = add_ptr_u(Gain, k);
V x[U], dy[U], gain[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
x[i] = load(X, i*32, b);
dy[i] = load(DY, i*32, b);
gain[i] = load(Gain, i*32, b);
}
V xhat[U];
if (relu)
{
Bias = add_ptr_u(Bias, k);
for (int i = 0; i < U; i++)
{
V bias = load(Bias, i*32, t + i*32 < K);
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
dy[i] = ew_relu_grad(dy[i], ew_add(ew_mul(xhat[i], gain[i]), bias));
}
}
else
{
for (int i = 0; i < U; i++)
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
}
V sum1[U], sum2[U];
for (int i = 0; i < U; i++)
{
dy[i] = ew_mul(dy[i], gain[i]);
sum1[i] = ew_mul(dy[i], xhat[i]);
sum2[i] = dy[i];
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
sum1[i] = ew_add(sum1[i], sum1[i+j]);
sum2[i] = ew_add(sum2[i], sum2[i+j]);
}
float2 sums;
sums.x = ew_sum(sum1[0]);
sums.y = ew_sum(sum2[0]);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = sums;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sums = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// final reduction to shared
Share[tid] = sums;
}
__syncthreads();
sums = Share[0];
}
// Compute and store dx
DX = add_ptr_u(DX, offset);
for (int i = 0; i < U; i++)
{
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy[i], ew_mul(ew_add(ew_mul(xhat[i], sums.x), sums.y), rcpK)), rstd);
store(DX, dx, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedBackward_NC(hipStream_t stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, uint N, uint S, uint K, float rcpK, int relu)
{
uint gridK = CEIL_DIV(K, 32);
uint gridN = 1;
uint blocksK = gridK * S;
while (gridN < (N>>3) && gridN * blocksK < 32*SMs) gridN += 1;
if (gridN * blocksK > 32*SMs && gridN > 1) gridN -= 1;
if (gridN > 1)
{
hipMemsetAsync((hipDeviceptr_t)dg, 0, S*K, stream);
hipMemsetAsync((hipDeviceptr_t)db, 0, S*K, stream);
}
hipLaunchKernelGGL(( layer_norm_segmented_dg_db_nc<T>), dim3(dim3(gridK,S,gridN)),dim3(32),0,stream, dg, db, dy, x, g, b, mean, rstd, N, S*K, S*K*gridN, K, relu);
dim3 grid(S, N, 1);
if ((K & 3) == 0 && K >= 512)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<V,float4,2>), dim3(grid),dim3(threads),0,stream, DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<V,float4,1>), dim3(grid),dim3(threads),0,stream, DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<T,float ,8>), dim3(grid),dim3(threads),0,stream, dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else if (K >= 512)
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<T,float ,4>), dim3(grid),dim3(threads),0,stream, dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*1) * 32;
int thread2 = THREAD_POW2(threads);
hipLaunchKernelGGL(( layer_norm_segmented_dx_nc<T,float ,1>), dim3(grid),dim3(threads),0,stream, dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedBackward_NC<float,float4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedBackward_NC<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedBackward_NC<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
#endif // GOOGLE_CUDA
| fda85e4eae95f0ee69666a797d275b69e14ab07d.cu |
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_NC(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint K, float rcpK, int relu)
{
uint tid = threadIdx.x;
uint n = blockIdx.x;
uint offset = n*K + tid;
// Mean
V v_mean1, v_mean2;
ew_zero(v_mean1);
ew_zero(v_mean2);
#pragma unroll 4
for (uint k = tid, offsetX = offset; k < K; k += THREADS, offsetX += THREADS)
{
// Single pass over X to compute mean and variance
// var(x) == mean(x**2) - mean(x)**2
V x = load(add_ptr_u(X, offsetX));
v_mean1 = ew_add(v_mean1, x);
v_mean2 = ew_add(v_mean2, ew_sqr(x));
}
float2 mean;
mean.x = ew_sum(v_mean1) * rcpK;
mean.y = ew_sum(v_mean2) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
{
mean.x += shfl_xor(mean.x, i);
mean.y += shfl_xor(mean.y, i);
}
// if using more than 1 warp, further reduced with shared memory
if (THREADS > 32)
{
__shared__ float2 Share[THREADS/32];
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = mean;
__syncthreads();
if (tid < THREADS/32)
{
// first warp loads all prior reductions
mean = Share[tid];
// reduce within this first warp
for (int i = THREADS/64; i > 0; i >>= 1)
{
mean.x += shfl_xor(mean.x, i);
mean.y += shfl_xor(mean.y, i);
}
// outputs final reduction to shared
if (tid == 0)
Share[0] = mean;
}
__syncthreads();
// broadcast result to all threads
mean = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float rstd = rsqrtf((mean.y - ew_sqr(mean.x)) + epsilon);
if (tid == 0)
{
Mean[n] = mean.x;
Rstd[n] = rstd;
}
// Norm/Gain/Bias
#pragma unroll 4
for (uint k = tid; k < K; k += THREADS, offset += THREADS)
{
V x = load(add_ptr_u(X, offset));
V g = load(G, k);
V b = load(B, k);
V xhat = ew_mul(ew_sub(x, mean.x), rstd);
V y = ew_add(ew_mul(xhat, g), b);
if (relu)
y = ew_relu(y);
store(add_ptr_u(Y, offset), y);
}
}
template <typename T, typename V>
bool LayerNormForward_NC(CUstream stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, int K, int N, float rcpK, int relu)
{
dim3 grid(N, 1, 1);
if ((K & 3) == 0)
{
K >>= 2; // use vector loads
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
layer_norm_NC<V,float4,256><<<grid, 256,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
else
layer_norm_NC<V,float4, 32><<<grid, 32,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, K, rcpK, relu);
}
else
{
if (K >= 256)
layer_norm_NC<T,float ,256><<<grid, 256,0,stream>>>(y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
else
layer_norm_NC<T,float , 32><<<grid, 32,0,stream>>>(y, mean, rstd, x, g, b, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormForward_NC<float,float4>(CUstream stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T, int U>
__global__ void __launch_bounds__(32) layer_norm_dg_db_NC(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, int N, int relu)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int shift = 5 - U; // 4
int mask = (1 << shift) - 1; // 15
int k = (bid << shift) + (tid & mask); // b*16 + 0-15
int n0 = (tid >> shift) << 2; // 0,4
int nk = n0*K + k;
bool b = k < K;
int strideK = K << (2 + U);
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = Gain[k];
bias = Bias[k];
}
for (int n = n0; n < N; n += (4 << U))
{
int n1 = n + 1;
int n2 = n + 2;
int n3 = n + 3;
int nk1 = nk + K;
int nk2 = nk1 + K;
int nk3 = nk2 + K;
float x0 = load( X, nk, b);
float x1 = load( X, nk1, b && (n1 < N));
float x2 = load( X, nk2, b && (n2 < N));
float x3 = load( X, nk3, b && (n3 < N));
float dy0 = load(DY, nk, b);
float dy1 = load(DY, nk1, b && (n1 < N));
float dy2 = load(DY, nk2, b && (n2 < N));
float dy3 = load(DY, nk3, b && (n3 < N));
float mean0 = Mean[n];
float rstd0 = Rstd[n];
float mean1 = 0.0f, rstd1 = 0.0f;
float mean2 = 0.0f, rstd2 = 0.0f;
float mean3 = 0.0f, rstd3 = 0.0f;
if (n1 < N)
{
mean1 = Mean[n1];
rstd1 = Rstd[n1];
}
if (n2 < N)
{
mean2 = Mean[n2];
rstd2 = Rstd[n2];
}
if (n3 < N)
{
mean3 = Mean[n3];
rstd3 = Rstd[n3];
}
float xhat0 = (x0 - mean0) * rstd0;
float xhat1 = (x1 - mean1) * rstd1;
float xhat2 = (x2 - mean2) * rstd2;
float xhat3 = (x3 - mean3) * rstd3;
if (relu)
{
dy0 = ew_relu_grad(dy0, xhat0 * gain + bias);
dy1 = ew_relu_grad(dy1, xhat1 * gain + bias);
dy2 = ew_relu_grad(dy2, xhat2 * gain + bias);
dy3 = ew_relu_grad(dy3, xhat3 * gain + bias);
}
dg += dy0 * xhat0;
dg += dy1 * xhat1;
dg += dy2 * xhat2;
dg += dy3 * xhat3;
db += dy0;
db += dy1;
db += dy2;
db += dy3;
nk += strideK;
}
#pragma unroll
for (int i = 16; i > (1 << (4-U)); i >>= 1)
{
dg += shfl_xor(dg, i);
db += shfl_xor(db, i);
}
store(DG, dg, k, b);
store(DB, db, k, b);
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_dx_NC(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
float epsilon, int K, float rcpK, int relu)
{
__shared__ float Share1[THREADS>>5];
__shared__ float Share2[THREADS>>5];
int tid = threadIdx.x;
int n = blockIdx.x;
int offset = n*K + tid;
float mean = Mean[n];
float rstd = Rstd[n];
const T* X1 = X + offset;
const T* Y1 = DY + offset;
V v_sum1, v_sum2;
ew_zero(v_sum1);
ew_zero(v_sum2);
for (int k = tid; k < K; k += THREADS)
{
V x = load(X1);
V dy = load(Y1);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
v_sum1 = ew_add(v_sum1, ew_mul(dy, xhat));
v_sum2 = ew_add(v_sum2, dy);
X1 += THREADS;
Y1 += THREADS;
}
float sum1 = ew_sum(v_sum1);
float sum2 = ew_sum(v_sum2);
// reduce within warp
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// first thread of each warp store to shared
if ((tid & 31) == 0)
{
Share1[tid >> 5] = sum1;
Share2[tid >> 5] = sum2;
}
__syncthreads();
if (tid < (THREADS>>5))
{
// first warp loads all prior reductions
sum1 = Share1[tid];
sum2 = Share2[tid];
// reduce within this last warp
#pragma unroll
for (int i = (THREADS>>6); i > 0; i >>= 1)
{
sum1 += shfl_xor(sum1, i);
sum2 += shfl_xor(sum2, i);
}
// outputs final reduction to shared
Share1[tid] = sum1;
Share2[tid] = sum2;
}
__syncthreads();
// broadcast result to all threads
sum1 = Share1[0];
sum2 = Share2[0];
X += offset;
DY += offset;
DX += offset;
for (int k = tid; k < K; k += THREADS)
{
V x = load(X);
V dy = load(DY);
V g = load(Gain, k);
V b = load(Bias, k, relu != 0);
V xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd);
store(DX, dx);
X += THREADS;
DY += THREADS;
DX += THREADS;
}
}
template <typename T, typename V>
bool LayerNormBackward_NC(CUstream stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, int K, int N, float rcpK, int relu)
{
int K32 = K >> 5;
// optimize layer_norm_backward1 for highest occupancy
if (K32 >= 28*16)
{
int gridK = K32 + ((K & 31) != 0);
layer_norm_dg_db_NC<T,0><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*8)
{
int gridK = (K >> 4) + ((K & 15) != 0);
layer_norm_dg_db_NC<T,1><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else if (K32 >= 28*4)
{
int gridK = (K >> 3) + ((K & 7) != 0);
layer_norm_dg_db_NC<T,2><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
else
{
int gridK = (K >> 2) + ((K & 3) != 0);
layer_norm_dg_db_NC<T,3><<<gridK, 32, 0, stream>>>(dg, db, dy, x, g, b, mean, rstd, epsilon, K, N, relu);
}
if ((K & 3) == 0)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* Gain = (const float4*)g;
const float4* Bias = (const float4*)b;
K >>= 2;
//if (K >= 1024)
// layer_norm_dx_NC<VB,VF,float4,1024><<<N,1024,0,stream>>>(DX, DY, X, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
layer_norm_dx_NC<V,float4, 256><<<N, 256,0,stream>>>(DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
else
layer_norm_dx_NC<V,float4, 64><<<N, 64,0,stream>>>(DX, DY, X, Gain, Bias, mean, rstd, epsilon, K, rcpK, relu);
}
else
{
//if (K >= 1024)
// layer_norm_dx_NC<B,F,float,1024><<<N,1024,0,stream>>>(dx, (const B*)dx, x, mean, rstd, epsilon, K, rcpK);
if (K >= 256)
layer_norm_dx_NC<T,float, 256><<<N, 256,0,stream>>>(dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
else
layer_norm_dx_NC<T,float, 64><<<N, 64,0,stream>>>(dx, dy, x, g, b, mean, rstd, epsilon, K, rcpK, relu);
}
return true; // TODO
}
template bool LayerNormBackward_NC<float,float4>(CUstream stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_nc(
T* Y,
float* Mean,
float* Rstd,
const T* __restrict__ X,
const V* __restrict__ G,
const V* __restrict__ B,
float epsilon, uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint s = blockIdx.x;
uint n = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
// Load X
V xval[U];
X = add_ptr_u(X, offset);
for (int i = 0; i < U; i++)
xval[i] = load(X, i*32, t + i*32 < K);
// Begin mean/variance reductions
V mean1[U], mean2[U];
for (int i = 0; i < U; i++)
{
mean1[i] = xval[i];
mean2[i] = ew_sqr(xval[i]);
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
mean1[i] = ew_add(mean1[i], mean1[i+j]);
mean2[i] = ew_add(mean2[i], mean2[i+j]);
}
float2 stats;
stats.x = ew_sum(mean1[0]) * rcpK;
stats.y = ew_sum(mean2[0]) * rcpK;
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = stats;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
stats = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
stats = ew_warp_sum(stats, i);
// final reduction to shared
Share[tid] = stats;
}
__syncthreads();
stats = Share[0];
}
// var = avg(x**2) - avg(x)**2
// rstd = 1/sqrt(var)
float mean = stats.x;
float rstd = rsqrtf((stats.y - mean*mean) + epsilon);
if (tid == 0)
{
__stg(add_ptr_u(Mean, m), mean);
__stg(add_ptr_u(Rstd, m), rstd);
}
// Load Gain/Bias
G = add_ptr_u(G, k);
B = add_ptr_u(B, k);
V gain[U], bias[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
gain[i] = load(G, i*32, b);
bias[i] = load(B, i*32, b);
}
// Compute and output norm
Y = add_ptr_u(Y, offset);
for (int i = 0; i < U; i++)
{
V xhat = ew_mul(ew_sub(xval[i], mean), rstd);
V y = ew_add(ew_mul(xhat, gain[i]), bias[i]);
if (relu)
y = ew_relu(y);
store(Y, y, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedForward_NC(CUstream stream, int SMs,
T* y,
float* mean,
float* rstd,
const T* x,
const float* g,
const float* b,
float epsilon, uint N, uint S, uint K, float rcpK, int relu)
{
dim3 grid(S, N, 1);
if ((K & 3) == 0)
{
V* Y = (V*)y;
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_nc<V,float4,2><<<grid,threads,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_nc<V,float4,1><<<grid,threads,0,stream>>>(Y, mean, rstd, X, G, B, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K >= 256)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_nc<T,float,8><<<grid,threads,0,stream>>>(y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_nc<T,float,4><<<grid,threads,0,stream>>>(y, mean, rstd, x, g, b, epsilon, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedForward_NC<float,float4>(CUstream stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedForward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
// Sum across N axis requries separtate kernel.
// dg = sum(dy * xhat(x), axis=0)
// db = sum(dy, axis=0)
// Don't use vector loads here as we want to maximize the number of blocks
template <typename T>
__global__ void __launch_bounds__(32) layer_norm_segmented_dg_db_nc(
float* DG,
float* DB,
const T* __restrict__ DY,
const T* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint SKz, uint K, int relu)
{
uint tid = threadIdx.x;
uint bk = blockIdx.x;
uint bs = blockIdx.y;
uint bn = blockIdx.z;
uint t = bk*32 + tid;
uint k = bs*K + t;
bool b = t < K;
float gain = 1.0f, bias = 0.0f, dg = 0.0f, db = 0.0f;
if (b && relu)
{
gain = __ldg(add_ptr_u(Gain, k));
bias = __ldg(add_ptr_u(Bias, k));
}
#pragma unroll 1
for (uint n = bn, m = bs*N + bn, nk = bn*SK + k; n < N; n += gridDim.z, m += gridDim.z, nk += SKz)
{
float x = load(add_ptr_u(X, nk), 0, b);
float dy = load(add_ptr_u(DY, nk), 0, b);
float mean = load(add_ptr_u(Mean, m));
float rstd = load(add_ptr_u(Rstd, m));
float xhat = (x - mean) * rstd;
if (relu)
dy = ew_relu_grad(dy, xhat * gain + bias);
dg += dy * xhat;
db += dy;
}
if (b)
{
DG = add_ptr_u(DG, k);
DB = add_ptr_u(DB, k);
if (gridDim.z == 1)
{
__stg(DG, dg);
__stg(DB, db);
}
else
{
atomicRed(DG, dg);
atomicRed(DB, db);
}
}
}
// xmean = x - mean(x, axis=1)
// xvar = var(x, axis=1)
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// dy = dy * g
// sum1 = sum(xhat * dy, axis=1)
// sum2 = sum(dy, axis=1)
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename T, typename V, int U>
__global__ void layer_norm_segmented_dx_nc(
T* DX,
const T* __restrict__ DY,
const T* __restrict__ X,
const V* __restrict__ Gain,
const V* __restrict__ Bias,
const float* __restrict__ Mean,
const float* __restrict__ Rstd,
uint N, uint SK, uint K, float rcpK, int relu, int thread2)
{
__shared__ float2 Share[32];
uint tid = threadIdx.x;
if (blockDim.x > 32)
{
// Allows non-power of 2 threads to work
float2 zero = {0.0f, 0.0f};
if (tid < 32)
Share[tid] = zero;
__syncthreads();
}
uint s = blockIdx.x;
uint n = blockIdx.y;
uint t = (tid & 0x3e0)*U + (tid & 31); // 0x3e0 = -32 & 1023
uint k = s*K + t;
uint m = s*N + n;
uint offset = n*SK + k;
float mean = __ldg(add_ptr_u(Mean, m));
float rstd = __ldg(add_ptr_u(Rstd, m));
X = add_ptr_u(X, offset);
DY = add_ptr_u(DY, offset);
Gain = add_ptr_u(Gain, k);
V x[U], dy[U], gain[U];
for (int i = 0; i < U; i++)
{
bool b = t + i*32 < K;
x[i] = load(X, i*32, b);
dy[i] = load(DY, i*32, b);
gain[i] = load(Gain, i*32, b);
}
V xhat[U];
if (relu)
{
Bias = add_ptr_u(Bias, k);
for (int i = 0; i < U; i++)
{
V bias = load(Bias, i*32, t + i*32 < K);
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
dy[i] = ew_relu_grad(dy[i], ew_add(ew_mul(xhat[i], gain[i]), bias));
}
}
else
{
for (int i = 0; i < U; i++)
xhat[i] = ew_mul(ew_sub(x[i], mean), rstd);
}
V sum1[U], sum2[U];
for (int i = 0; i < U; i++)
{
dy[i] = ew_mul(dy[i], gain[i]);
sum1[i] = ew_mul(dy[i], xhat[i]);
sum2[i] = dy[i];
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
{
sum1[i] = ew_add(sum1[i], sum1[i+j]);
sum2[i] = ew_add(sum2[i], sum2[i+j]);
}
float2 sums;
sums.x = ew_sum(sum1[0]);
sums.y = ew_sum(sum2[0]);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// reduce across warps
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Share[tid/32] = sums;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sums = Share[tid];
// reduce within this last warp
#pragma unroll 1
for (int i = thread2/64; i > 0; i >>= 1)
sums = ew_warp_sum(sums, i);
// final reduction to shared
Share[tid] = sums;
}
__syncthreads();
sums = Share[0];
}
// Compute and store dx
DX = add_ptr_u(DX, offset);
for (int i = 0; i < U; i++)
{
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
V dx = ew_mul(ew_sub(dy[i], ew_mul(ew_add(ew_mul(xhat[i], sums.x), sums.y), rcpK)), rstd);
store(DX, dx, i*32, t + i*32 < K);
}
}
template <typename T, typename V>
bool LayerNormSegmentedBackward_NC(CUstream stream, int SMs,
T* dx,
float* dg,
float* db,
const T* dy,
const T* x,
const float* g,
const float* b,
const float* mean,
const float* rstd,
float epsilon, uint N, uint S, uint K, float rcpK, int relu)
{
uint gridK = CEIL_DIV(K, 32);
uint gridN = 1;
uint blocksK = gridK * S;
while (gridN < (N>>3) && gridN * blocksK < 32*SMs) gridN += 1;
if (gridN * blocksK > 32*SMs && gridN > 1) gridN -= 1;
if (gridN > 1)
{
cuMemsetD32Async((CUdeviceptr)dg, 0, S*K, stream);
cuMemsetD32Async((CUdeviceptr)db, 0, S*K, stream);
}
layer_norm_segmented_dg_db_nc<T><<<dim3(gridK,S,gridN),32,0,stream>>>(dg, db, dy, x, g, b, mean, rstd, N, S*K, S*K*gridN, K, relu);
dim3 grid(S, N, 1);
if ((K & 3) == 0 && K >= 512)
{
V* DX = ( V*)dx;
const V* DY = (const V*)dy; // in place op
const V* X = (const V*)x;
const float4* G = (const float4*)g;
const float4* B = (const float4*)b;
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_dx_nc<V,float4,2><<<grid,threads,0,stream>>>(DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
K >>= 2;
layer_norm_segmented_dx_nc<V,float4,1><<<grid,threads,0,stream>>>(DX, DY, X, G, B, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
else
{
if (K > 4096)
{
uint threads = CEIL_DIV(K, 32*8) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_dx_nc<T,float ,8><<<grid,threads,0,stream>>>(dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else if (K >= 512)
{
uint threads = CEIL_DIV(K, 32*4) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_dx_nc<T,float ,4><<<grid,threads,0,stream>>>(dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
else
{
uint threads = CEIL_DIV(K, 32*1) * 32;
int thread2 = THREAD_POW2(threads);
layer_norm_segmented_dx_nc<T,float ,1><<<grid,threads,0,stream>>>(dx, dy, x, g, b, mean, rstd, N, S*K, K, rcpK, relu, thread2);
}
}
return true; // TODO
}
template bool LayerNormSegmentedBackward_NC<float,float4>(CUstream stream, int SMs, float* dx, float* dg, float* db, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedBackward_NC<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* dx, float* dg, float* db, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
template bool LayerNormSegmentedBackward_NC<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* dx, float* dg, float* db, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, uint N, uint S, uint K, float rcpK, int relu);
#endif // GOOGLE_CUDA
|
36c4123f711d0097e24499dde631d76d9409e228.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from ztranspose_inplace.cu normal z -> c, Fri Sep 11 18:29:21 2015
@author Stan Tomov
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_c
#define NB 16
////////////////////////////////////////////////////////////////////////////////
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See ctranspose_inplace_even for description of threads.
__global__ void ctranspose_inplace_odd( int n, magmaFloatComplex *matrix, int lda )
{
__shared__ magmaFloatComplex sA[ NB ][ NB+1 ];
__shared__ magmaFloatComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
////////////////////////////////////////////////////////////////////////////////
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void ctranspose_inplace_even( int n, magmaFloatComplex *matrix, int lda )
{
__shared__ magmaFloatComplex sA[ NB ][ NB+1 ];
__shared__ magmaFloatComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/**
Purpose
-------
ctranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as ctranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_inplace_q(
magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
hipLaunchKernelGGL(( ctranspose_inplace_odd), dim3(grid), dim3(threads), 0, queue , n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
hipLaunchKernelGGL(( ctranspose_inplace_even), dim3(grid), dim3(threads), 0, queue , n, dA, ldda );
}
}
/**
@see magmablas_ctranspose_inplace_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_inplace(
magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda )
{
magmablas_ctranspose_inplace_q( n, dA, ldda, magma_stream );
}
| 36c4123f711d0097e24499dde631d76d9409e228.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from ztranspose_inplace.cu normal z -> c, Fri Sep 11 18:29:21 2015
@author Stan Tomov
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_c
#define NB 16
////////////////////////////////////////////////////////////////////////////////
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See ctranspose_inplace_even for description of threads.
__global__ void ctranspose_inplace_odd( int n, magmaFloatComplex *matrix, int lda )
{
__shared__ magmaFloatComplex sA[ NB ][ NB+1 ];
__shared__ magmaFloatComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
////////////////////////////////////////////////////////////////////////////////
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void ctranspose_inplace_even( int n, magmaFloatComplex *matrix, int lda )
{
__shared__ magmaFloatComplex sA[ NB ][ NB+1 ];
__shared__ magmaFloatComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/**
Purpose
-------
ctranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as ctranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_inplace_q(
magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
ctranspose_inplace_odd<<< grid, threads, 0, queue >>>( n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
ctranspose_inplace_even<<< grid, threads, 0, queue >>>( n, dA, ldda );
}
}
/**
@see magmablas_ctranspose_inplace_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_inplace(
magma_int_t n,
magmaFloatComplex_ptr dA, magma_int_t ldda )
{
magmablas_ctranspose_inplace_q( n, dA, ldda, magma_stream );
}
|
9230dfc9a607a28e87ebc2fe7b778145e7101d56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* [email protected]
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
hipDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",hipGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of hipDeviceProp_t::name
char devicename[devicenamelength];
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
const size_t pixels_per_slice = image_size[0] * image_size[1] ;
const size_t mem_slice_image = sizeof(float)* pixels_per_slice ;
const size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices shoudl be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
hipHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
hipHostMalloc((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
hipHostMalloc((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
hipHostMalloc((void**)&buffer_u, pixels_per_slice*sizeof(float));
hipHostMalloc((void**)&buffer_px, pixels_per_slice*sizeof(float));
hipHostMalloc((void**)&buffer_py, pixels_per_slice*sizeof(float));
hipHostMalloc((void**)&buffer_pz, pixels_per_slice*sizeof(float));
}
// We shoudl be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
hipSetDevice(dev);
// F
hipMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
hipMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
hipMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
hipMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
hipMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
hipDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*pixels_per_slice;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
// Precompute indices and needed bytes
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*pixels_per_slice;
linear_idx_start=pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
// All these are async
hipMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
hipMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
hipMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+3]);
hipMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipStreamSynchronize(stream[dev*nStream_device+4]);
hipMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
hipStreamSynchronize(stream[dev*nStream_device+1]);
hipMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_u), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
hipLaunchKernelGGL(( update_p), dim3(grid), dim3(block),0,stream[dev*nStream_device], d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Syncronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
hipSetDevice(dev+1);
hipMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev+1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev+1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
hipDeviceSynchronize();
if (dev>0){
// U
hipSetDevice(dev-1);
hipMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
hipSetDevice(dev);
hipStreamSynchronize(stream[(dev-1)*nStream_device+1]);
hipMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+2]);
hipMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+3]);
hipMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
hipStreamSynchronize(stream[(dev-1)*nStream_device+4]);
hipMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), hipMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
hipMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we dont need to get these out.
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
hipMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+2]);
hipMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+3]);
hipMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices = ((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels = curr_slices*pixels_per_slice;
hipMemcpyAsync(dst+slices_per_split*pixels_per_slice*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
hipFree(d_src[dev]);
hipFree(d_u [dev]);
hipFree(d_pz[dev]);
hipFree(d_py[dev]);
hipFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
hipHostFree(h_px);
hipHostFree(h_py);
hipHostFree(h_pz);
}else if(splits==1){
hipHostFree(buffer_u);
hipHostFree(buffer_px);
hipHostFree(buffer_py);
hipHostFree(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
hipHostUnregister(src);
hipHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
| 9230dfc9a607a28e87ebc2fe7b778145e7101d56.cu | /*-------------------------------------------------------------------------
*
* MATLAB MEX functions for TV image denoising. Check inputs and parses
* MATLAB data to C++ data.
*
*
* CODE by Imanol Luengo
* PhD student University of Nottingham
* [email protected]
* 2015
* Modified by Ander Biguri for multi-GPU
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47
#define MAXTREADS 1024
#define MAX_BUFFER 60
#define BLOCK_SIZE 10 // BLOCK_SIZE^3 must be smaller than MAXTREADS
#include "tvdenoising.hpp"
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
cudaDeviceReset();\
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:TVdenoising",cudaGetErrorString(__err));\
} \
} while (0)
__device__ __inline__
float divergence(const float* pz, const float* py, const float* px,
long z, long y, long x, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float _div = 0.0f;
if ( z - 1 >= 0 ) {
_div += (pz[idx] - pz[(z-1)*size2d + y*cols + x]) / dz;
} else {
_div += pz[idx];
}
if ( y - 1 >= 0 ) {
_div += (py[idx] - py[z*size2d + (y-1)*cols + x]) / dy;
} else {
_div += py[idx];
}
if ( x - 1 >= 0 ) {
_div += (px[idx] - px[z*size2d + y*cols + (x-1)]) / dx;
} else {
_div += px[idx];
}
return _div;
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long size2d = rows*cols;
long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z + 1 < depth ) {
grad[0] = (u[(z+1)*size2d + y*cols + x] - uidx) / dz;
}
if ( y + 1 < rows ) {
grad[1] = (u[z*size2d + (y+1)*cols + x] - uidx) / dy;
}
if ( x + 1 < cols ) {
grad[2] = (u[z*size2d + y*cols + (x+1)] - uidx) / dx;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px, float* u,
float tau, float lambda,
long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float _div = divergence(pz, py, px, z, y, x, depth, rows, cols, dz, dy, dx);
u[idx] = u[idx] * (1.0f - tau) + tau * (f[idx] + (1.0f/lambda) * _div);
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, long depth, long rows, long cols,
float dz, float dy, float dx)
{
long x = threadIdx.x + blockIdx.x * blockDim.x;
long y = threadIdx.y + blockIdx.y * blockDim.y;
long z = threadIdx.z + blockIdx.z * blockDim.z;
long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float grad[3] = {0,0,0}, q[3];
gradient(u, grad, z, y, x, depth, rows, cols, dz, dy, dx);
q[0] = pz[idx] + tau * grad[0];
q[1] = py[idx] + tau * grad[1];
q[2] = px[idx] + tau * grad[2];
float norm = fmaxf(1.0f, sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2]));
pz[idx] = q[0] / norm;
py[idx] = q[1] / norm;
px[idx] = q[2] / norm;
}
// Main function
void tvdenoising(float* src, float* dst, float lambda,
const float* spacing, const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
const int devicenamelength = 256; // The length 256 is fixed by spec of cudaDeviceProp::name
char devicename[devicenamelength];
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicename,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
memset(devicename, 0, devicenamelength);
strcpy(devicename, deviceProp.name);
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
const size_t pixels_per_slice = image_size[0] * image_size[1] ;
const size_t mem_slice_image = sizeof(float)* pixels_per_slice ;
const size_t mem_size_image = sizeof(float)* total_pixels;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=1;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 5*mem_size_image+5*mem_slice_image*buffer_length*2){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*( (image_size[2]+deviceCount-1)/deviceCount + buffer_length*2);
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global;
splits=(unsigned int)(ceil(((float)(5*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 5*mem_img_each_GPU){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices shoudl be able to fit in here??!?!
mem_free=mem_GPU_global-(5*mem_img_each_GPU);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/5; // we need double whatever this results in, rounded down.
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// Assert
if (mem_GPU_global< 5*mem_img_each_GPU){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","Bad assert. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
if (isHostRegisterSupported & splits>1){
cudaHostRegister(src ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Lets allocate auxiliary variables.
float* buffer_u, *buffer_px, *buffer_py, *buffer_pz;
float* h_px, *h_py, *h_pz, *h_u;
if(splits>1){
//These take A LOT of memory and A LOT of time to use. If we can avoid using them, better.
if (buffer_length<maxIter){ // if we do only 1 big iter, they are not needed.
mexWarnMsgIdAndTxt("tvDenoise:tvdenoising:Memory","TV dneoising requires 5 times the image memory. Your GPU(s) do not have the required memory.\n This memory will be attempted to allocate on the CPU, Whic may fail or slow the computation by a very significant amount.\n If you want to kill the execution: CTRL+C");
cudaMallocHost((void**)&h_px,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_py,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
cudaMallocHost((void**)&h_pz,image_size[0]*image_size[1]*image_size[2]*sizeof(float));
cudaCheckErrors("Malloc error on auxiliary variables on CPU.\n Your image is too big to use SART_TV or im3Ddenoise in your current machine");
}
h_u=dst;
}else{
cudaMallocHost((void**)&buffer_u, pixels_per_slice*sizeof(float));
cudaMallocHost((void**)&buffer_px, pixels_per_slice*sizeof(float));
cudaMallocHost((void**)&buffer_py, pixels_per_slice*sizeof(float));
cudaMallocHost((void**)&buffer_pz, pixels_per_slice*sizeof(float));
}
// We shoudl be good to go memory wise.
float** d_src =(float**)malloc(deviceCount*sizeof(float*));
float** d_u =(float**)malloc(deviceCount*sizeof(float*));
float** d_px =(float**)malloc(deviceCount*sizeof(float*));
float** d_py =(float**)malloc(deviceCount*sizeof(float*));
float** d_pz =(float**)malloc(deviceCount*sizeof(float*));
//Malloc
for(dev=0;dev<deviceCount;dev++){
cudaSetDevice(dev);
// F
cudaMalloc((void**)&d_src[dev], mem_img_each_GPU);
// U
cudaMalloc((void**)&d_u [dev], mem_img_each_GPU);
// PX
cudaMalloc((void**)&d_px[dev], mem_img_each_GPU);
// PY
cudaMalloc((void**)&d_py[dev], mem_img_each_GPU);
// PZ
cudaMalloc((void**)&d_pz[dev], mem_img_each_GPU);
}
cudaDeviceSynchronize();
cudaCheckErrors("Malloc error");
// Create streams
int nStream_device=5;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// Allocate CPU buffer if needed, warn user if not.
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long buffer_pixels=buffer_length*pixels_per_slice;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
float tau2, tau1;
for(unsigned int i=0;i<maxIter;i+=(buffer_length)){
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
// Precompute indices and needed bytes
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*pixels_per_slice;
linear_idx_start=pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
// All these are async
cudaMemcpyAsync(d_u[dev] +offset_device[dev], d_src[dev]+offset_device[dev], bytes_device[dev]*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
cudaMemsetAsync(d_px[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_py[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
cudaMemsetAsync(d_pz[dev], 0, mem_img_each_GPU,stream[dev*nStream_device]);
}
// we need all the stream to finish
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memcpy failure");
}
// if we need to split and its not the first iteration, then we need to copy from Host memory.
// d_src is the original image, with no change.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_u [dev] +offset_device[dev], h_u +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+offset_device[dev], h_px+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+2]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+3]);
cudaMemcpyAsync(d_py[dev] +offset_device[dev], h_py+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+3]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaStreamSynchronize(stream[dev*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev] +offset_device[dev], h_pz+offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+4]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaStreamSynchronize(stream[dev*nStream_device+1]);
cudaMemcpyAsync(d_src[dev]+offset_device[dev], src +offset_host[dev], bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
cudaCheckErrors("Memcpy failure on multi split");
}
}
// Inter interations.
for(unsigned int ib=0; (ib<(buffer_length)) && ((i+ib)<maxIter); ib++){
tau2 = 0.3f + 0.02f * (i+ib);
tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+(i+ib))));
// bdim and gdim
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_u<<<grid, block,0,stream[dev*nStream_device]>>>(d_src[dev], d_pz[dev], d_py[dev], d_px[dev], d_u[dev], tau1, lambda,
(long)(curr_slices+buffer_length*2), image_size[1],image_size[0],
spacing[2], spacing[1], spacing[0]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((image_size[0]+block.x-1)/block.x, (image_size[1]+block.y-1)/block.y, (curr_slices+buffer_length*2+block.z-1)/block.z);
update_p<<<grid, block,0,stream[dev*nStream_device]>>>(d_u[dev], d_pz[dev], d_py[dev], d_px[dev], tau2,
(long)(curr_slices+buffer_length*2), image_size[1], image_size[0],
spacing[2], spacing[1], spacing[0]);
}
}// END internal iter
// Syncronize mathematics, make sure bounding pixels are correct
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
if (dev<deviceCount-1){
// U
cudaSetDevice(dev+1);
cudaMemcpyAsync(buffer_u , d_u[dev+1] , buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev+1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev+1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
cudaDeviceSynchronize();
if (dev>0){
// U
cudaSetDevice(dev-1);
cudaMemcpyAsync(buffer_u, d_u[dev-1] +slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(buffer_px, d_px[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(buffer_py, d_py[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(buffer_pz, d_pz[dev-1]+slices_per_split*pixels_per_slice+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[(dev-1)*nStream_device+4]);
cudaSetDevice(dev);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+1]);
cudaMemcpyAsync(d_u[dev] ,buffer_u , buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+1]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+2]);
cudaMemcpyAsync(d_px[dev],buffer_px, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+2]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+3]);
cudaMemcpyAsync(d_py[dev],buffer_py, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+3]);
cudaStreamSynchronize(stream[(dev-1)*nStream_device+4]);
cudaMemcpyAsync(d_pz[dev],buffer_pz, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice,stream[(dev)*nStream_device+4]);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
cudaMemcpyAsync(&h_u[linear_idx_start], d_u [dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
if ((i+buffer_length)<maxIter){ // If its the last iteration, we dont need to get these out.
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices = ((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start = pixels_per_slice*slices_per_split*(sp*deviceCount+dev);
total_pixels = curr_slices*pixels_per_slice;
cudaMemcpyAsync(&h_px[linear_idx_start], d_px[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+2]);
cudaMemcpyAsync(&h_py[linear_idx_start], d_py[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+3]);
cudaMemcpyAsync(&h_pz[linear_idx_start], d_pz[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+4]);
}
}
}
}//END splits
}//END main iter
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("TV minimization");
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices = ((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels = curr_slices*pixels_per_slice;
cudaMemcpyAsync(dst+slices_per_split*pixels_per_slice*dev, d_u[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
cudaFree(d_src[dev]);
cudaFree(d_u [dev]);
cudaFree(d_pz[dev]);
cudaFree(d_py[dev]);
cudaFree(d_px[dev]);
}
if(splits>1 && buffer_length<maxIter){
cudaFreeHost(h_px);
cudaFreeHost(h_py);
cudaFreeHost(h_pz);
}else if(splits==1){
cudaFreeHost(buffer_u);
cudaFreeHost(buffer_px);
cudaFreeHost(buffer_py);
cudaFreeHost(buffer_pz);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
if (isHostRegisterSupported & splits>1){
cudaHostUnregister(src);
cudaHostUnregister(dst);
}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Copy free ");
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
b3cb6ec08e40a8ddd641d9a28eb077472bbbdfab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/prroi_pool_op.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void GPUPRROIPoolForward(const int nthreads,
const T* input_data,
const T* input_rois,
const float spatial_scale,
const int input_channels,
const int height,
const int width,
const int output_channels,
const int pooled_height,
const int pooled_width,
const int* rois_batch_id_data,
T* output_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int input_channel = c;
const T* offset_input_data =
input_data +
(roi_batch_id * input_channels + input_channel) * height * width;
if (win_size > static_cast<T>(0.0)) {
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = 0;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
sum_out += PrRoIPoolingMatCalculation(
offset_input_data,
h_iter,
w_iter,
h_iter + 1,
w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height,
width);
}
}
output_data[i] = sum_out / win_size;
} else {
output_data[i] = 0.;
}
}
}
template <typename T>
__global__ void GPUPRROIPoolBackward(const int nthreads,
const T* in_data,
const T* input_rois,
const T* output_grad_data,
const float spatial_scale,
const int input_channels,
const int height,
const int width,
const int output_channels,
const int pooled_height,
const int pooled_width,
const int* rois_batch_id_data,
T* input_grad_data,
const T* out_data,
T* input_roi_grad_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
int input_channel = c;
int input_offset =
(roi_batch_id * input_channels + input_channel) * height * width;
T* offset_input_grad_data = input_grad_data + input_offset;
const T* offset_output_grad_data = output_grad_data + i;
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T* offset_input_roi_grad_data = input_roi_grad_data + n * 4;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = win_size == static_cast<T>(0.)
? static_cast<T>(0.)
: *offset_output_grad_data / win_size;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
PrRoIPoolingMatDistributeDiff<T>(
offset_input_grad_data,
sum_out,
h_iter,
w_iter,
h_iter + 1,
w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height,
width);
}
}
const T* offset_out_data = out_data + i;
const T* offset_in_data = in_data + input_offset;
PrRoIPoolingCoorBackward<T>(s_w,
e_w,
s_h,
e_h,
width,
height,
win_start_w,
win_start_h,
win_end_w,
win_end_h,
pw,
ph,
pooled_width,
pooled_height,
win_size,
spatial_scale,
offset_in_data,
offset_out_data,
offset_input_roi_grad_data,
offset_output_grad_data);
}
}
template <typename T, typename DeviceContext>
class GPUPRROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* rois = ctx.Input<phi::DenseTensor>("ROIs");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int input_channels = in_dims[1];
auto output_channels = input_channels;
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
// set rois batch id
phi::DenseTensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<phi::DenseTensor>("BatchRoINums");
phi::DenseTensor batch_index_cpu;
framework::TensorCopySync(
*batchroinum, platform::CPUPlace(), &batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size,
batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and input(X) batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num,
rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(
dev_ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
rois_batch_id_data,
bytes,
dev_ctx.stream());
// call cuda kernel function
hipLaunchKernelGGL(( GPUPRROIPoolForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size,
in->data<T>(),
rois->data<T>(),
spatial_scale,
input_channels,
height,
width,
output_channels,
pooled_height,
pooled_width,
roi_id_data,
out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename T, typename DeviceContext>
class GPUPRROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* rois = ctx.Input<phi::DenseTensor>("ROIs");
auto* out = ctx.Input<phi::DenseTensor>("Out");
auto* output_grad =
ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto* input_grad =
ctx.Output<phi::DenseTensor>(framework::GradVarName("X"));
auto* input_roi_grad =
ctx.Output<phi::DenseTensor>(framework::GradVarName("ROIs"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int input_channels = in->dims()[1];
auto output_channels = input_channels;
int height = in->dims()[2];
int width = in->dims()[3];
if (input_grad || input_roi_grad) {
// set roi batch id
phi::DenseTensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<phi::DenseTensor>("BatchRoINums");
phi::DenseTensor batch_index_cpu;
framework::TensorCopySync(
*batchroinum, platform::CPUPlace(), &batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
PADDLE_ENFORCE_EQ(rois->lod().empty(),
false,
platform::errors::InvalidArgument(
"the lod of Input ROIs should not be empty when "
"BatchRoINums is None!"));
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(
dev_ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
rois_batch_id_data,
bytes,
dev_ctx.stream());
input_grad->mutable_data<T>(ctx.GetPlace());
phi::funcs::SetConstant<DeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0));
input_roi_grad->mutable_data<T>(ctx.GetPlace());
set_zero(ctx.cuda_device_context(), input_roi_grad, static_cast<T>(0));
int output_grad_size = output_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUPRROIPoolBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size,
in->data<T>(),
rois->data<T>(),
output_grad->data<T>(),
spatial_scale,
input_channels,
height,
width,
output_channels,
pooled_height,
pooled_width,
roi_id_data,
input_grad->mutable_data<T>(ctx.GetPlace()),
out->data<T>(),
input_roi_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
prroi_pool, GPU, ALL_LAYOUT, ops::GPUPRROIPoolOpKernel, float, double) {}
PD_REGISTER_STRUCT_KERNEL(prroi_pool_grad,
GPU,
ALL_LAYOUT,
ops::GPUPRROIPoolGradOpKernel,
float,
double) {}
| b3cb6ec08e40a8ddd641d9a28eb077472bbbdfab.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/prroi_pool_op.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void GPUPRROIPoolForward(const int nthreads,
const T* input_data,
const T* input_rois,
const float spatial_scale,
const int input_channels,
const int height,
const int width,
const int output_channels,
const int pooled_height,
const int pooled_width,
const int* rois_batch_id_data,
T* output_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int input_channel = c;
const T* offset_input_data =
input_data +
(roi_batch_id * input_channels + input_channel) * height * width;
if (win_size > static_cast<T>(0.0)) {
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = 0;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
sum_out += PrRoIPoolingMatCalculation(
offset_input_data,
h_iter,
w_iter,
h_iter + 1,
w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height,
width);
}
}
output_data[i] = sum_out / win_size;
} else {
output_data[i] = 0.;
}
}
}
template <typename T>
__global__ void GPUPRROIPoolBackward(const int nthreads,
const T* in_data,
const T* input_rois,
const T* output_grad_data,
const float spatial_scale,
const int input_channels,
const int height,
const int width,
const int output_channels,
const int pooled_height,
const int pooled_width,
const int* rois_batch_id_data,
T* input_grad_data,
const T* out_data,
T* input_roi_grad_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
int input_channel = c;
int input_offset =
(roi_batch_id * input_channels + input_channel) * height * width;
T* offset_input_grad_data = input_grad_data + input_offset;
const T* offset_output_grad_data = output_grad_data + i;
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T* offset_input_roi_grad_data = input_roi_grad_data + n * 4;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = win_size == static_cast<T>(0.)
? static_cast<T>(0.)
: *offset_output_grad_data / win_size;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
PrRoIPoolingMatDistributeDiff<T>(
offset_input_grad_data,
sum_out,
h_iter,
w_iter,
h_iter + 1,
w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height,
width);
}
}
const T* offset_out_data = out_data + i;
const T* offset_in_data = in_data + input_offset;
PrRoIPoolingCoorBackward<T>(s_w,
e_w,
s_h,
e_h,
width,
height,
win_start_w,
win_start_h,
win_end_w,
win_end_h,
pw,
ph,
pooled_width,
pooled_height,
win_size,
spatial_scale,
offset_in_data,
offset_out_data,
offset_input_roi_grad_data,
offset_output_grad_data);
}
}
template <typename T, typename DeviceContext>
class GPUPRROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* rois = ctx.Input<phi::DenseTensor>("ROIs");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int input_channels = in_dims[1];
auto output_channels = input_channels;
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
// set rois batch id
phi::DenseTensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<phi::DenseTensor>("BatchRoINums");
phi::DenseTensor batch_index_cpu;
framework::TensorCopySync(
*batchroinum, platform::CPUPlace(), &batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size,
batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and input(X) batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num,
rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(
dev_ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
rois_batch_id_data,
bytes,
dev_ctx.stream());
// call cuda kernel function
GPUPRROIPoolForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size,
in->data<T>(),
rois->data<T>(),
spatial_scale,
input_channels,
height,
width,
output_channels,
pooled_height,
pooled_width,
roi_id_data,
out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename T, typename DeviceContext>
class GPUPRROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<phi::DenseTensor>("X");
auto* rois = ctx.Input<phi::DenseTensor>("ROIs");
auto* out = ctx.Input<phi::DenseTensor>("Out");
auto* output_grad =
ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"));
auto* input_grad =
ctx.Output<phi::DenseTensor>(framework::GradVarName("X"));
auto* input_roi_grad =
ctx.Output<phi::DenseTensor>(framework::GradVarName("ROIs"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int input_channels = in->dims()[1];
auto output_channels = input_channels;
int height = in->dims()[2];
int width = in->dims()[3];
if (input_grad || input_roi_grad) {
// set roi batch id
phi::DenseTensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<phi::DenseTensor>("BatchRoINums");
phi::DenseTensor batch_index_cpu;
framework::TensorCopySync(
*batchroinum, platform::CPUPlace(), &batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
PADDLE_ENFORCE_EQ(rois->lod().empty(),
false,
platform::errors::InvalidArgument(
"the lod of Input ROIs should not be empty when "
"BatchRoINums is None!"));
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(
dev_ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace,
roi_id_data,
cplace,
rois_batch_id_data,
bytes,
dev_ctx.stream());
input_grad->mutable_data<T>(ctx.GetPlace());
phi::funcs::SetConstant<DeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0));
input_roi_grad->mutable_data<T>(ctx.GetPlace());
set_zero(ctx.cuda_device_context(), input_roi_grad, static_cast<T>(0));
int output_grad_size = output_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUPRROIPoolBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size,
in->data<T>(),
rois->data<T>(),
output_grad->data<T>(),
spatial_scale,
input_channels,
height,
width,
output_channels,
pooled_height,
pooled_width,
roi_id_data,
input_grad->mutable_data<T>(ctx.GetPlace()),
out->data<T>(),
input_roi_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
prroi_pool, GPU, ALL_LAYOUT, ops::GPUPRROIPoolOpKernel, float, double) {}
PD_REGISTER_STRUCT_KERNEL(prroi_pool_grad,
GPU,
ALL_LAYOUT,
ops::GPUPRROIPoolGradOpKernel,
float,
double) {}
|
5dbc302dafd21db4d1bac7b5140079769bf7183c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ float gamma_correction(float f_stop, float gamma, float val)
{
return powf((val*powf(2.0,f_stop)),(1.0/gamma));
}
__device__ float rgb2Lum(float B, float G, float R)
{
return B * 0.0722 + G * 0.7152 + R * 0.2126;
}
__global__ void gamma_tonemap_kernel(float* imageIn, float* imageOut, int width, int height, int channels, float f_stop, float gamma)
{
int Row = blockDim.y * blockIdx.y + threadIdx.y;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
if(Row < height && Col < width) {
float B, G, R, L, nL, scale;
B = imageIn[(Row*width+Col)*3+BLUE];
G = imageIn[(Row*width+Col)*3+GREEN];
R = imageIn[(Row*width+Col)*3+RED];
L = rgb2Lum(B, G, R);
nL = gamma_correction(f_stop, gamma, L);
scale = nL / L;
imageOut[(Row*width+Col)*3+BLUE] = B * scale;
imageOut[(Row*width+Col)*3+GREEN] = G * scale;
imageOut[(Row*width+Col)*3+RED] = R * scale;
}
} | 5dbc302dafd21db4d1bac7b5140079769bf7183c.cu | #include "includes.h"
__device__ float gamma_correction(float f_stop, float gamma, float val)
{
return powf((val*powf(2.0,f_stop)),(1.0/gamma));
}
__device__ float rgb2Lum(float B, float G, float R)
{
return B * 0.0722 + G * 0.7152 + R * 0.2126;
}
__global__ void gamma_tonemap_kernel(float* imageIn, float* imageOut, int width, int height, int channels, float f_stop, float gamma)
{
int Row = blockDim.y * blockIdx.y + threadIdx.y;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
if(Row < height && Col < width) {
float B, G, R, L, nL, scale;
B = imageIn[(Row*width+Col)*3+BLUE];
G = imageIn[(Row*width+Col)*3+GREEN];
R = imageIn[(Row*width+Col)*3+RED];
L = rgb2Lum(B, G, R);
nL = gamma_correction(f_stop, gamma, L);
scale = nL / L;
imageOut[(Row*width+Col)*3+BLUE] = B * scale;
imageOut[(Row*width+Col)*3+GREEN] = G * scale;
imageOut[(Row*width+Col)*3+RED] = R * scale;
}
} |
de40f16b14bd1d9658ba830bec4d4e1c563a36fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "cuda_kernel.cuh"
#include "cuda_threadindex.cuh"
#include <stdio.h>
/**
* Calculate sum of vectors
*/
__global__ void matrix_mul_gpu_element(float *gpuA, float *gpuB, float *gpuC, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < n && row < n) {
float a, b, sum = 0;
for (int k = 0; k < n; ++k) {
a = gpuA[k + row*n];
b = gpuB[col + k*n];
sum += a * b;
}
gpuC[col + row*n] = sum;
}
}
| de40f16b14bd1d9658ba830bec4d4e1c563a36fd.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "cuda_kernel.cuh"
#include "cuda_threadindex.cuh"
#include <stdio.h>
/**
* Calculate sum of vectors
*/
__global__ void matrix_mul_gpu_element(float *gpuA, float *gpuB, float *gpuC, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < n && row < n) {
float a, b, sum = 0;
for (int k = 0; k < n; ++k) {
a = gpuA[k + row*n];
b = gpuB[col + k*n];
sum += a * b;
}
gpuC[col + row*n] = sum;
}
}
|
1a09cb2c5aba690e0489e12f7195f9862b3d5225.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dwt_per_X_O.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_ip = NULL;
hipMalloc(&d_ip, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int cA_cols = 1;
int filt_len = 1;
int Halo_steps = 1;
float *d_cL = NULL;
hipMalloc(&d_cL, XSIZE*YSIZE);
float *d_cH = NULL;
hipMalloc(&d_cH, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dwt_per_X_O), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ip,rows,cols,cA_cols,filt_len,Halo_steps,d_cL,d_cH);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dwt_per_X_O), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ip,rows,cols,cA_cols,filt_len,Halo_steps,d_cL,d_cH);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dwt_per_X_O), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ip,rows,cols,cA_cols,filt_len,Halo_steps,d_cL,d_cH);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1a09cb2c5aba690e0489e12f7195f9862b3d5225.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dwt_per_X_O.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_ip = NULL;
cudaMalloc(&d_ip, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int cA_cols = 1;
int filt_len = 1;
int Halo_steps = 1;
float *d_cL = NULL;
cudaMalloc(&d_cL, XSIZE*YSIZE);
float *d_cH = NULL;
cudaMalloc(&d_cH, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dwt_per_X_O<<<gridBlock,threadBlock>>>(d_ip,rows,cols,cA_cols,filt_len,Halo_steps,d_cL,d_cH);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dwt_per_X_O<<<gridBlock,threadBlock>>>(d_ip,rows,cols,cA_cols,filt_len,Halo_steps,d_cL,d_cH);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dwt_per_X_O<<<gridBlock,threadBlock>>>(d_ip,rows,cols,cA_cols,filt_len,Halo_steps,d_cL,d_cH);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
727996e4ef9e07cfbee192cca2fe898f9dd2e270.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "image_properties.h"
#include "array_properties.h"
#include "shared_functions.h"
#include "serial_functions.h"
__global__ void
parallel_train_network(image_chunk * image_grid, network_chunk * network_grid)
{
char bit_value = image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].image_data[threadIdx.x%64][threadIdx.x/64];
/*In this case, where there are necessarily more variables than can be included in a single launch
due to needing to specify more dimensions than can be arranged in blocks (limited to three)
and adding thread dimensions would exceed the hardware limit (512^2 = 262,144,
even the best GPU can't exceed 2048) it's a case of choosing between multiple kernel launches,
or including a loop in the device code. In this case, I chose to have just one loop here.*/
for (int i = 0; i < blockDim.x; i++)
{
char relating_bit_value = image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].image_data[i % 64][i / 64];
if (relating_bit_value == bit_value)
{
network_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].network_weights[threadIdx.x][i] += 1;
}
else
{
network_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].network_weights[threadIdx.x][i] += -1;
}
}
}
__global__ void
parallel_recall_image(image_chunk * image_grid, network_chunk * network_grid, int colour_channel) {
//accessible to all threads within a block.
__shared__ int neuron_output[BITS_PER_SUBIMAGE];
/*Unlike the above code, here there are separate kernels for each colour channel--otherwise there
would be nested for loops in the device code. Because of the need to access a shared array, this
has the potential for leaving lots of threads waiting most of the time, which should be avoided
as it's poor performance.*/
char weighted_value = image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(colour_channel*VER_ARRAYS*HOR_ARRAYS)].image_data[threadIdx.x % 64][threadIdx.x / 64] * 2 - 1;
neuron_output[threadIdx.x] = network_grid[blockIdx.x + blockIdx.y*VER_ARRAYS +
colour_channel*VER_ARRAYS*HOR_ARRAYS].network_weights[blockIdx.z][threadIdx.x] * weighted_value;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) { //first half of the array adds the second
//then first half of THAT adds the second, until everything has been added to the 0th element
if (threadIdx.x < i) {
neuron_output[threadIdx.x] += neuron_output[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) //only operate this on one thread
//sets bit values
{
if (neuron_output[0] > 0) {
image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) + (colour_channel*VER_ARRAYS*HOR_ARRAYS)]
.image_data[blockIdx.z % 64][blockIdx.z / 64] = 1;
}
else {
image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) + (colour_channel*VER_ARRAYS*HOR_ARRAYS)]
.image_data[blockIdx.z % 64][blockIdx.z / 64] = 0;
}
}
}
__host__
void
parallel_trainer(image_chunk ***image_grid, network_chunk ***network_grid) {
//convert the multidimensional array to a single array--can be transferred in one go to the GPU
//rather than specifying the dimensions in the method header.
//as we can see here, using the Cuda version 6.0+ unified memory allows for transferring structs
//rather than having to allocate memory for each sub-array individually and update the device pointers
image_chunk * parallel_image;
hipMallocManaged(¶llel_image, sizeof(image_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
network_chunk * parallel_network;
hipMallocManaged(¶llel_network, sizeof(network_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y]
= image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y]
= network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y];
}
}
}
}
}
dim3 grid_dimensions(VER_ARRAYS, HOR_ARRAYS, COLOUR_CHANNELS); //dimensions of the block grid
//e.g., how many blocks to have
dim3 block_dimensions(BITS_PER_SUBIMAGE); //number of threads per block. With one value, the Y and Z dimensions
//are initialised to 1.
hipLaunchKernelGGL(( parallel_train_network), dim3(grid_dimensions), dim3(block_dimensions), 0, 0, parallel_image, parallel_network);
hipDeviceSynchronize();
//copy back over to the original arrays
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y] =
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y] =
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y];
}
}
}
}
}
hipFree(parallel_image);
hipFree(parallel_network);
}
__host__
void
parallel_recall(image_chunk ***image_grid, network_chunk ***network_grid, int loops) {
//as above, copy the data into linear arrays
image_chunk * parallel_image;
hipMallocManaged(¶llel_image, sizeof(image_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
network_chunk * parallel_network;
hipMallocManaged(¶llel_network, sizeof(network_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y]
= image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y]
= network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y];
}
}
}
}
}
dim3 grid_dimensions(VER_ARRAYS, HOR_ARRAYS, BITS_PER_SUBIMAGE);
dim3 block_dimensions(BITS_PER_SUBIMAGE);
for (int i = 0; i < loops; i++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
hipLaunchKernelGGL(( parallel_recall_image), dim3(grid_dimensions), dim3(block_dimensions), 0, 0, parallel_image, parallel_network, colour);
hipDeviceSynchronize(); //makes sure that the program doesn't proceed until the device code is finished executing
//all data is already on the device, so it's just a matter of launching another kernel
}
}
//copy back over to the original arrays
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y] =
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y] =
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y];
}
}
}
}
}
hipFree(parallel_image);
hipFree(parallel_network);
} | 727996e4ef9e07cfbee192cca2fe898f9dd2e270.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "image_properties.h"
#include "array_properties.h"
#include "shared_functions.h"
#include "serial_functions.h"
__global__ void
parallel_train_network(image_chunk * image_grid, network_chunk * network_grid)
{
char bit_value = image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].image_data[threadIdx.x%64][threadIdx.x/64];
/*In this case, where there are necessarily more variables than can be included in a single launch
due to needing to specify more dimensions than can be arranged in blocks (limited to three)
and adding thread dimensions would exceed the hardware limit (512^2 = 262,144,
even the best GPU can't exceed 2048) it's a case of choosing between multiple kernel launches,
or including a loop in the device code. In this case, I chose to have just one loop here.*/
for (int i = 0; i < blockDim.x; i++)
{
char relating_bit_value = image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].image_data[i % 64][i / 64];
if (relating_bit_value == bit_value)
{
network_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].network_weights[threadIdx.x][i] += 1;
}
else
{
network_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(blockIdx.z*VER_ARRAYS*HOR_ARRAYS)].network_weights[threadIdx.x][i] += -1;
}
}
}
__global__ void
parallel_recall_image(image_chunk * image_grid, network_chunk * network_grid, int colour_channel) {
//accessible to all threads within a block.
__shared__ int neuron_output[BITS_PER_SUBIMAGE];
/*Unlike the above code, here there are separate kernels for each colour channel--otherwise there
would be nested for loops in the device code. Because of the need to access a shared array, this
has the potential for leaving lots of threads waiting most of the time, which should be avoided
as it's poor performance.*/
char weighted_value = image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) +
(colour_channel*VER_ARRAYS*HOR_ARRAYS)].image_data[threadIdx.x % 64][threadIdx.x / 64] * 2 - 1;
neuron_output[threadIdx.x] = network_grid[blockIdx.x + blockIdx.y*VER_ARRAYS +
colour_channel*VER_ARRAYS*HOR_ARRAYS].network_weights[blockIdx.z][threadIdx.x] * weighted_value;
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i >>= 1) { //first half of the array adds the second
//then first half of THAT adds the second, until everything has been added to the 0th element
if (threadIdx.x < i) {
neuron_output[threadIdx.x] += neuron_output[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) //only operate this on one thread
//sets bit values
{
if (neuron_output[0] > 0) {
image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) + (colour_channel*VER_ARRAYS*HOR_ARRAYS)]
.image_data[blockIdx.z % 64][blockIdx.z / 64] = 1;
}
else {
image_grid[blockIdx.x + (blockIdx.y*VER_ARRAYS) + (colour_channel*VER_ARRAYS*HOR_ARRAYS)]
.image_data[blockIdx.z % 64][blockIdx.z / 64] = 0;
}
}
}
__host__
void
parallel_trainer(image_chunk ***image_grid, network_chunk ***network_grid) {
//convert the multidimensional array to a single array--can be transferred in one go to the GPU
//rather than specifying the dimensions in the method header.
//as we can see here, using the Cuda version 6.0+ unified memory allows for transferring structs
//rather than having to allocate memory for each sub-array individually and update the device pointers
image_chunk * parallel_image;
cudaMallocManaged(¶llel_image, sizeof(image_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
network_chunk * parallel_network;
cudaMallocManaged(¶llel_network, sizeof(network_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y]
= image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y]
= network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y];
}
}
}
}
}
dim3 grid_dimensions(VER_ARRAYS, HOR_ARRAYS, COLOUR_CHANNELS); //dimensions of the block grid
//e.g., how many blocks to have
dim3 block_dimensions(BITS_PER_SUBIMAGE); //number of threads per block. With one value, the Y and Z dimensions
//are initialised to 1.
parallel_train_network<<<grid_dimensions, block_dimensions>>>(parallel_image, parallel_network);
cudaDeviceSynchronize();
//copy back over to the original arrays
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y] =
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y] =
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y];
}
}
}
}
}
cudaFree(parallel_image);
cudaFree(parallel_network);
}
__host__
void
parallel_recall(image_chunk ***image_grid, network_chunk ***network_grid, int loops) {
//as above, copy the data into linear arrays
image_chunk * parallel_image;
cudaMallocManaged(¶llel_image, sizeof(image_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
network_chunk * parallel_network;
cudaMallocManaged(¶llel_network, sizeof(network_chunk)*VER_ARRAYS*HOR_ARRAYS*COLOUR_CHANNELS);
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y]
= image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y]
= network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y];
}
}
}
}
}
dim3 grid_dimensions(VER_ARRAYS, HOR_ARRAYS, BITS_PER_SUBIMAGE);
dim3 block_dimensions(BITS_PER_SUBIMAGE);
for (int i = 0; i < loops; i++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
parallel_recall_image<<<grid_dimensions, block_dimensions>>>(parallel_image, parallel_network, colour);
cudaDeviceSynchronize(); //makes sure that the program doesn't proceed until the device code is finished executing
//all data is already on the device, so it's just a matter of launching another kernel
}
}
//copy back over to the original arrays
for (int chunk_x = 0; chunk_x < VER_ARRAYS; chunk_x++)
{
for (int chunk_y = 0; chunk_y < HOR_ARRAYS; chunk_y++)
{
for (int colour = 0; colour < COLOUR_CHANNELS; colour++)
{
for (int image_y = 0; image_y < 8; image_y++)
{
for (int image_x = 0; image_x < 64; image_x++)
{
image_grid[chunk_x][chunk_y][colour].image_data[image_x][image_y] =
parallel_image[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.image_data[image_x][image_y];
}
}
for (int relationship_x = 0; relationship_x < BITS_PER_SUBIMAGE; relationship_x++)
{
for (int relationship_y = 0; relationship_y < BITS_PER_SUBIMAGE; relationship_y++)
{
network_grid[chunk_x][chunk_y][colour].network_weights[relationship_x][relationship_y] =
parallel_network[chunk_x + (chunk_y * VER_ARRAYS) + (colour * VER_ARRAYS * HOR_ARRAYS)]
.network_weights[relationship_x][relationship_y];
}
}
}
}
}
cudaFree(parallel_image);
cudaFree(parallel_network);
} |
54beeccc68794622e7832e90dfb36c6d1288ee29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample calculates scalar products of a
* given set of input vector pairs
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <helper_functions.h>
#include <helper_cuda.h>
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU
///////////////////////////////////////////////////////////////////////////////
extern "C"
void scalarProdCPU(
float *h_C,
float *h_A,
float *h_B,
int vectorN,
int elementN
);
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on GPU
///////////////////////////////////////////////////////////////////////////////
#include "scalarProd_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
//Total number of input vector pairs; arbitrary
const int VECTOR_N = 16;//256;
//Number of elements per vector; arbitrary,
//but strongly preferred to be a multiple of warp size
//to meet memory coalescing constraints
const int ELEMENT_N = 16 * 1024;
//Total number of data elements
const int DATA_N = VECTOR_N * ELEMENT_N;
const int DATA_SZ = DATA_N * sizeof(float);
const int RESULT_SZ = VECTOR_N * sizeof(float);
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *h_A, *h_B, *h_C_CPU, *h_C_GPU;
float *d_A, *d_B, *d_C;
double delta, ref, sum_delta, sum_ref, L1norm;
StopWatchInterface *hTimer = NULL;
int i;
printf("%s Starting...\n\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory.\n");
h_A = (float *)malloc(DATA_SZ);
h_B = (float *)malloc(DATA_SZ);
h_C_CPU = (float *)malloc(RESULT_SZ);
h_C_GPU = (float *)malloc(RESULT_SZ);
printf("...allocating GPU memory.\n");
checkCudaErrors(hipMalloc((void **)&d_A, DATA_SZ));
checkCudaErrors(hipMalloc((void **)&d_B, DATA_SZ));
checkCudaErrors(hipMalloc((void **)&d_C, RESULT_SZ));
printf("...generating input data in CPU mem.\n");
srand(123);
//Generating input data on CPU
for (i = 0; i < DATA_N; i++)
{
h_A[i] = RandFloat(0.0f, 1.0f);
h_B[i] = RandFloat(0.0f, 1.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(hipMemcpy(d_A, h_A, DATA_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, DATA_SZ, hipMemcpyHostToDevice));
printf("Data init done.\n");
printf("Executing GPU kernel...\n");
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
hipLaunchKernelGGL(( scalarProdGPU), dim3(128), dim3(256), 0, 0, d_C, d_A, d_B, VECTOR_N, ELEMENT_N);
getLastCudaError("scalarProdGPU() execution failed\n");
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
printf("GPU time: %f msecs.\n", sdkGetTimerValue(&hTimer));
printf("Reading back GPU result...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(hipMemcpy(h_C_GPU, d_C, RESULT_SZ, hipMemcpyDeviceToHost));
printf("Checking GPU results...\n");
printf("..running CPU scalar product calculation\n");
scalarProdCPU(h_C_CPU, h_A, h_B, VECTOR_N, ELEMENT_N);
printf("...comparing the results\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
for (i = 0; i < VECTOR_N; i++)
{
delta = fabs(h_C_GPU[i] - h_C_CPU[i]);
ref = h_C_CPU[i];
sum_delta += delta;
sum_ref += ref;
}
L1norm = sum_delta / sum_ref;
printf("Shutting down...\n");
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_A));
free(h_C_GPU);
free(h_C_CPU);
free(h_B);
free(h_A);
sdkDeleteTimer(&hTimer);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
printf("L1 error: %E\n", L1norm);
printf((L1norm < 1e-6) ? "Test passed\n" : "Test failed!\n");
exit(L1norm < 1e-6 ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 54beeccc68794622e7832e90dfb36c6d1288ee29.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample calculates scalar products of a
* given set of input vector pairs
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <helper_functions.h>
#include <helper_cuda.h>
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU
///////////////////////////////////////////////////////////////////////////////
extern "C"
void scalarProdCPU(
float *h_C,
float *h_A,
float *h_B,
int vectorN,
int elementN
);
///////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on GPU
///////////////////////////////////////////////////////////////////////////////
#include "scalarProd_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
//Total number of input vector pairs; arbitrary
const int VECTOR_N = 16;//256;
//Number of elements per vector; arbitrary,
//but strongly preferred to be a multiple of warp size
//to meet memory coalescing constraints
const int ELEMENT_N = 16 * 1024;
//Total number of data elements
const int DATA_N = VECTOR_N * ELEMENT_N;
const int DATA_SZ = DATA_N * sizeof(float);
const int RESULT_SZ = VECTOR_N * sizeof(float);
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *h_A, *h_B, *h_C_CPU, *h_C_GPU;
float *d_A, *d_B, *d_C;
double delta, ref, sum_delta, sum_ref, L1norm;
StopWatchInterface *hTimer = NULL;
int i;
printf("%s Starting...\n\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory.\n");
h_A = (float *)malloc(DATA_SZ);
h_B = (float *)malloc(DATA_SZ);
h_C_CPU = (float *)malloc(RESULT_SZ);
h_C_GPU = (float *)malloc(RESULT_SZ);
printf("...allocating GPU memory.\n");
checkCudaErrors(cudaMalloc((void **)&d_A, DATA_SZ));
checkCudaErrors(cudaMalloc((void **)&d_B, DATA_SZ));
checkCudaErrors(cudaMalloc((void **)&d_C, RESULT_SZ));
printf("...generating input data in CPU mem.\n");
srand(123);
//Generating input data on CPU
for (i = 0; i < DATA_N; i++)
{
h_A[i] = RandFloat(0.0f, 1.0f);
h_B[i] = RandFloat(0.0f, 1.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(cudaMemcpy(d_A, h_A, DATA_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, DATA_SZ, cudaMemcpyHostToDevice));
printf("Data init done.\n");
printf("Executing GPU kernel...\n");
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
scalarProdGPU<<<128, 256>>>(d_C, d_A, d_B, VECTOR_N, ELEMENT_N);
getLastCudaError("scalarProdGPU() execution failed\n");
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
printf("GPU time: %f msecs.\n", sdkGetTimerValue(&hTimer));
printf("Reading back GPU result...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(cudaMemcpy(h_C_GPU, d_C, RESULT_SZ, cudaMemcpyDeviceToHost));
printf("Checking GPU results...\n");
printf("..running CPU scalar product calculation\n");
scalarProdCPU(h_C_CPU, h_A, h_B, VECTOR_N, ELEMENT_N);
printf("...comparing the results\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
for (i = 0; i < VECTOR_N; i++)
{
delta = fabs(h_C_GPU[i] - h_C_CPU[i]);
ref = h_C_CPU[i];
sum_delta += delta;
sum_ref += ref;
}
L1norm = sum_delta / sum_ref;
printf("Shutting down...\n");
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_A));
free(h_C_GPU);
free(h_C_CPU);
free(h_B);
free(h_A);
sdkDeleteTimer(&hTimer);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
printf("L1 error: %E\n", L1norm);
printf((L1norm < 1e-6) ? "Test passed\n" : "Test failed!\n");
exit(L1norm < 1e-6 ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
1abd15d986993fd3aca5a2986bd22759f0761c47.hip | // !!! This is a file automatically generated by hipify!!!
/**
https://blog.csdn.net/Bruce_0712/article/details/64928442
hipDeviceSynchronize()CPUGPUCUDAkernel
hipDeviceSynchronize()cudaDeviceSynchronize()CUDA
hipStreamSynchronize()stream IDCPUGPUstream IDCUDAstreamCUDA
warp __ syncthreads
(1)warpshared/global ,
globalsharedwarp
(2)blockwarpshared/global ,
__syncthreads(), __threadfence()
(3)gridblockshared/gloabl
__threadfence*()
//
[vec_add2.cu:63] GPU
allocated 76.29 MB on GPU
GPU cost time=0.06606s last_num=4e+07
CPU cost time=2.53995s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <ctime>
#include "common/book.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
using namespace std;
typedef float FLOAT;
// CPU
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host__host__
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// idid
/**
* <<<(256,256),256>>> grid 2 block 1 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1 block 1 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1 block 1 tid=threadIdx.x
* <<<256,1>>> grid 1 block 1 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; //
// __syncthreads(); //
}
int main(int argc, char *argv[])
{
mycout<<"GPU "<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx gpuhxcpu
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1CPU ==========*/
/* alllocate CPU mem */
// hx = (FLOAT *) malloc(nbytes);
// hy = (FLOAT *) malloc(nbytes);
// hz = (FLOAT *) malloc(nbytes);
// malloc cudaMallocHost
HANDLE_ERROR(hipHostMalloc((void **)&hx, nbytes));
HANDLE_ERROR(hipHostMalloc((void **)&hy, nbytes));
HANDLE_ERROR(hipHostMalloc((void **)&hz, nbytes));
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*CPU*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/**======2GPU ======*/
/* allocate GPU mem */
HANDLE_ERROR(hipMalloc((void **)&dx, nbytes));
HANDLE_ERROR(hipMalloc((void **)&dy, nbytes));
HANDLE_ERROR(hipMalloc((void **)&dz, nbytes));
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3CPUGPU======*/
/** copy data to GPU */
HANDLE_ERROR(hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dy, hy, nbytes, hipMemcpyHostToDevice));
/**======4GPU======*/
/* call GPU */
// hipDeviceSynchronize(); // GPU
HANDLE_ERROR(hipDeviceSynchronize()); // hipDeviceSynchronize()
clock_t start = clock();
for (i = 0; i < itr; i++)hipLaunchKernelGGL(( vec_add), dim3(grid), dim3(bs), 0, 0, dx, dy, dz, N);
// hipDeviceSynchronize(); //
HANDLE_ERROR(hipDeviceSynchronize()); // hipDeviceSynchronize()
/**======5GPUCPU======*/
HANDLE_ERROR(hipMemcpy(hz,dz, nbytes, hipMemcpyDeviceToHost));
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// CPU
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
//
HANDLE_ERROR(hipFree(dx));
HANDLE_ERROR(hipFree(dy));
HANDLE_ERROR(hipFree(dz));
// malloc
//free(hx);
//free(hy);
//free(hz);
// hipHostMalloc
HANDLE_ERROR(hipHostFree(hx));
HANDLE_ERROR(hipHostFree(hy));
HANDLE_ERROR(hipHostFree(hz));
return 0;
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
}
| 1abd15d986993fd3aca5a2986bd22759f0761c47.cu | /**
https://blog.csdn.net/Bruce_0712/article/details/64928442
cudaDeviceSynchronize():该方法将停止CPU端线程的执行,直到GPU端完成之前CUDA的任务,包括kernel函数、数据拷贝等。
cudaThreadSynchronize():该方法的作用和cudaDeviceSynchronize()基本相同,但它不是一个被推荐的方法,也许在后期版本的CUDA中会被删除。
cudaStreamSynchronize():这个方法接受一个stream ID,它将阻止CPU执行直到GPU端完成相应stream ID的所有CUDA任务,但其它stream中的CUDA任务可能执行完也可能没有执行完。
跨warp进行同步,您需要使用 __ syncthreads()
(1)在同一个warp内的线程读写shared/global 不需同步,
读写global和shared是立刻对本warp内的其他线程立刻可见的。
(2)在同一个block内的不同warp内线程读写shared/global 需同步,
这种读写必须使用__syncthreads(), 或者__threadfence()来实现不同的读写可见效果。
(3)在同一个grid内的不同block内的线程读写shared/gloabl 需要同步
这种读写必须使用__threadfence*()来实现一定的读写可见效果。
// 执行结果
[vec_add2.cu:63] GPU 实现向量的加法
allocated 76.29 MB on GPU
GPU cost time=0.06606s last_num=4e+07
CPU cost time=2.53995s last_num=4e+07
*/
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <ctime>
#include "common/book.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
using namespace std;
typedef float FLOAT;
// CPU 向量加法
// __host__ void vec_add_host(FLOAT *x,FLOAT* y,FLOAT *z,int N);
// or host函数__host__可以省略
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N);
// GPU 函数
__global__ void vec_add(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
// 获取线程id(同时有很多个线程在执行这个函数,通过线程id区分)
/**
* <<<(256,256),256>>> grid 2维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y
* <<<256,256>>> grid 1维 block 1维 tid=threadIdx.x+blockDim.x*blockIdx.x
* <<<1,256>>> grid 1维 block 1维 tid=threadIdx.x
* <<<256,1>>> grid 1维 block 1维 tid=blockDim.x*blockIdx.x
*/
int tid=threadIdx.x+blockDim.x*blockIdx.x+blockDim.x*gridDim.x*blockIdx.y;
if (tid<N) z[tid]=x[tid]+y[tid]; // 开的线程数必须大于数据总数,保证每个数据都能参与计算
// __syncthreads(); // 线程同步
}
int main(int argc, char *argv[])
{
mycout<<"GPU 实现向量的加法"<<endl;
int N = 20000000;
int nbytes = N * sizeof(FLOAT);
/* 1D block */
int bs = 256;
/* 2D grid */
// int s = ceil(sqrt((N + bs - 1.) / bs));
int s = ceil(sqrt(1.0*N / bs));
dim3 grid = dim3(s, s);
// dx 表示gpu变量,hx表示cpu变量
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *hy = NULL;
FLOAT *dz = NULL, *hz = NULL;
int itr = 30;
int i;
// double th, td;
/**======1、CPU 创建变量赋值==========*/
/* alllocate CPU mem */
// hx = (FLOAT *) malloc(nbytes);
// hy = (FLOAT *) malloc(nbytes);
// hz = (FLOAT *) malloc(nbytes);
// 普通malloc分配内存速度比 cudaMallocHost慢
HANDLE_ERROR(cudaMallocHost((void **)&hx, nbytes));
HANDLE_ERROR(cudaMallocHost((void **)&hy, nbytes));
HANDLE_ERROR(cudaMallocHost((void **)&hz, nbytes));
if (hx == NULL || hy == NULL || hz == NULL) {
// printf("couldn't allocate CPU memory\n");
mycout<<"couldn't allocate CPU memory"<<endl;
return -2;
}
/*给CPU变量赋值*/
// fill the arrays 'hx' and 'hy' on the CPU
for (int i=0; i<N; i++) {
hx[i] = i;
hy[i] = i ;
}
/**======2、GPU 分配内存======*/
/* allocate GPU mem */
HANDLE_ERROR(cudaMalloc((void **)&dx, nbytes));
HANDLE_ERROR(cudaMalloc((void **)&dy, nbytes));
HANDLE_ERROR(cudaMalloc((void **)&dz, nbytes));
if (dx == NULL || dy == NULL || dz == NULL) {
// printf("couldn't allocate GPU memory\n");
mycout<<"couldn't allocate GPU memory"<<endl;
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes / (1024.f * 1024.f));
/**======3、将CPU数据拷贝给GPU======*/
/** copy data to GPU */
HANDLE_ERROR(cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dy, hy, nbytes, cudaMemcpyHostToDevice));
/**======4、调用GPU计算======*/
/* call GPU */
// cudaThreadSynchronize(); // 线程同步,等到前面的GPU数据拷贝完成
HANDLE_ERROR(cudaDeviceSynchronize()); // cudaThreadSynchronize() 弃用了
clock_t start = clock();
for (i = 0; i < itr; i++) vec_add<<<grid, bs>>>(dx, dy, dz, N);
// cudaThreadSynchronize(); // 线程同步 等待所有线程处理完成
HANDLE_ERROR(cudaDeviceSynchronize()); // cudaThreadSynchronize() 弃用了
/**======5、GPU计算结果拷贝给CPU======*/
HANDLE_ERROR(cudaMemcpy(hz,dz, nbytes, cudaMemcpyDeviceToHost));
cout<<"GPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 计算CPU的时间
start = clock();
for (i = 0; i < itr; i++) vec_add_host(hx, hy, hz, N);
cout<<"CPU cost time="<<(double)(clock()-start)/CLOCKS_PER_SEC <<"s\t"<<
"last_num="<<hz[N-1]<<endl;
// 释放内存
HANDLE_ERROR(cudaFree(dx));
HANDLE_ERROR(cudaFree(dy));
HANDLE_ERROR(cudaFree(dz));
// 普通malloc释放方式
//free(hx);
//free(hy);
//free(hz);
// cudaMallocHost 释放方式
HANDLE_ERROR(cudaFreeHost(hx));
HANDLE_ERROR(cudaFreeHost(hy));
HANDLE_ERROR(cudaFreeHost(hz));
return 0;
}
void vec_add_host(FLOAT *x,FLOAT *y,FLOAT *z,int N)
{
for(int i=0;i<N;i++)
{
z[i]=x[i]+y[i];
}
}
|
f377b5a1141f552e495266dac94bb927b36736c1.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "hip/hip_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %8s %6s", size, count, typeName, opName);
}
void AllReduceGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllReduceInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
TESTCHECK(InitDataReduce(args->expected[i], recvcount, 0, type, op, rep, nranks));
CUDACHECK(hipDeviceSynchronize());
}
return testSuccess;
}
void AllReduceGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(2*(nranks - 1)))/((double)nranks);
*busBw = baseBw * factor;
}
testResult_t AllReduceRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
NCCLCHECK(ncclAllReduce(sendbuff, recvbuff, count, type, op, comm, stream));
return testSuccess;
}
struct testColl allReduceTest = {
"AllReduce",
AllReduceGetCollByteCount,
AllReduceInitData,
AllReduceGetBw,
AllReduceRunColl
};
void AllReduceGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllReduceGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllReduceRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allReduceTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
op_count = 1;
run_ops = &op;
run_opnames = &opName;
} else {
op_count = ncclNumOps;
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
// refer to https://github.com/NVIDIA/nccl-tests/issues/50
#if defined(__APPLE__) && defined(__MACH__)
struct testEngine ncclTestEngine = {
AllReduceGetBuffSize,
AllReduceRunTest
};
#else
struct testEngine allReduceEngine = {
AllReduceGetBuffSize,
AllReduceRunTest
};
#pragma weak ncclTestEngine=allReduceEngine
#endif
| f377b5a1141f552e495266dac94bb927b36736c1.cu | /*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "cuda_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %8s %6s", size, count, typeName, opName);
}
void AllReduceGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllReduceInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
TESTCHECK(InitDataReduce(args->expected[i], recvcount, 0, type, op, rep, nranks));
CUDACHECK(cudaDeviceSynchronize());
}
return testSuccess;
}
void AllReduceGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(2*(nranks - 1)))/((double)nranks);
*busBw = baseBw * factor;
}
testResult_t AllReduceRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
NCCLCHECK(ncclAllReduce(sendbuff, recvbuff, count, type, op, comm, stream));
return testSuccess;
}
struct testColl allReduceTest = {
"AllReduce",
AllReduceGetCollByteCount,
AllReduceInitData,
AllReduceGetBw,
AllReduceRunColl
};
void AllReduceGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllReduceGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllReduceRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allReduceTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
op_count = 1;
run_ops = &op;
run_opnames = &opName;
} else {
op_count = ncclNumOps;
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
// refer to https://github.com/NVIDIA/nccl-tests/issues/50
#if defined(__APPLE__) && defined(__MACH__)
struct testEngine ncclTestEngine = {
AllReduceGetBuffSize,
AllReduceRunTest
};
#else
struct testEngine allReduceEngine = {
AllReduceGetBuffSize,
AllReduceRunTest
};
#pragma weak ncclTestEngine=allReduceEngine
#endif
|
890f52f55341d843ccd46f482d19a09e5f6002d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
Modified by: Ahmad Nour & Mohammed Osama
*/
#include "generate_dmrs_pusch_hip.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
__global__ void generate_reference_signal(hipfftComplex* dmrs2_d, int w_vector, int M_sc_rb) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (x_idx >= M_sc_rb)
return;
dmrs2_d[x_idx] = w_vector * dmrs2_d[x_idx];
}
void generate_dmrs_pusch(int N_subfr, int N_id_cell, int delta_ss, bool group_hopping_enabled, bool sequence_hopping_enabled, int cyclic_shift, int cyclic_shift_dci, char* w_config, int N_prbs, int layer, hipfftComplex** dmrs1_d, hipfftComplex** dmrs2_d, hipfftComplex* x_q_d, hipStream_t stream_dmrs)
{
//Calculate M_sc_rb (called in generate_ul_rs M_sc_rs)
int M_sc_rb = N_prbs*N_sc_rb;
//Calculate N_s
int N_s = N_subfr * 2;
//Set lambda
int lambda = layer;
//Calculate f_ss_pusch
int f_ss_pusch = ((N_id_cell % 30) + delta_ss) % 30;
//Generate c
Byte* c = (Byte*)malloc(sizeof(Byte)* 8 * N_ul_symb * 20);
int c_init = floor(N_id_cell / 30) * 32 + f_ss_pusch;
generate_psuedo_random_seq(&c, 8 * N_ul_symb * 20, 0, 0, c_init); //added c_init in N_id_cell according to ahmed nour
//Calculate n_pn_ns
int n_pn_ns_1 = c[8 * N_ul_symb*N_s + 0] + c[8 * N_ul_symb*N_s + 1] * 2 + c[8 * N_ul_symb*N_s + 2] * 4 + c[8 * N_ul_symb*N_s + 3] * 8 + c[8 * N_ul_symb*N_s + 4] * 16 + c[8 * N_ul_symb*N_s + 5] * 32 + c[8 * N_ul_symb*N_s + 6] * 64 + c[8 * N_ul_symb*N_s + 7] * 128;
int n_pn_ns_2 = c[8 * N_ul_symb*(N_s + 1) + 0] + c[8 * N_ul_symb*(N_s + 1) + 1]*2 + c[8 * N_ul_symb*(N_s + 1) + 2]*4 + c[8 * N_ul_symb*(N_s + 1) + 3]*8 + c[8 * N_ul_symb*(N_s + 1) + 4]*16 + c[8 * N_ul_symb*(N_s + 1) + 5]*32 + c[8 * N_ul_symb*(N_s + 1) + 6]*64 + c[8 * N_ul_symb*(N_s + 1) + 7]*128;
//Determine n_1_dmrs
int n_1_dmrs = N_1_DMRS[cyclic_shift];
//Determine n_2_dmrs_lambda
int n_2_dmrs_lambda = N_2_DMRS_LAMBDA[cyclic_shift_dci][lambda];
//Calculate n_cs_lambda
int n_cs_lambda_1 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_1) % 12;
int n_cs_lambda_2 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_2) % 12;
//Calculate alpha_lambda
float alpha_lambda_1 = 2 * PI *n_cs_lambda_1 / (float)12;
float alpha_lambda_2 = 2 * PI *n_cs_lambda_2 / (float)12;
//Generate the base reference signal
generate_ul_rs(N_s, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_1, N_prbs, &*dmrs1_d, x_q_d, &stream_dmrs);
generate_ul_rs(N_s+1, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_2, N_prbs, &*dmrs2_d, x_q_d, &stream_dmrs);
//Determine w vector
int w_vector;
if (!strcmp(w_config, "fixed"))
{
w_vector = 1;
}
else
{
w_vector = W_VECTOR[cyclic_shift_dci*4 + lambda];
}
//Generate the PUSCH demodulation reference signal sequence
generate_reference_signal << < 2, 1024, 0, stream_dmrs >> >(*dmrs2_d, w_vector, M_sc_rb);
}
| 890f52f55341d843ccd46f482d19a09e5f6002d3.cu | /*
% Function: generate_dmrs_pusch
% Description: Generates LTE demodulation reference signal for PUSCH
% Inputs: N_subfr - Subframe number within a radio frame
% N_id_cell - Physical layer cell identity
% delta_ss - Configurable portion of the sequence-shift pattern for PUSCH (sib2 groupAssignmentPUSCH)
% group_hopping_enabled - Boolean value determining if group hopping is enabled (sib2 groupHoppingEnabled)
% sequence_hopping_enabled - Boolean value determining if sequence hopping is enabled (sib2 sequenceHoppingEnabled)
% cyclic_shift - Broadcast cyclic shift to apply to base reference signal (sib2 cyclicShift)
% cyclic_shift_dci - Scheduled cyclic shift to apply to base reference signal
% w_config - fixed or table
% N_prbs - Number of PRBs used for the uplink grant
% layer - Which diversity layer to generate reference signals for
% Outputs: *dmrs1_h - Demodulation reference signal for PUSCH
*dmrs2_h - Demodulation reference signal for PUSCH
By: Mohammed Mostafa
Modified by: Ahmad Nour & Mohammed Osama
*/
#include "generate_dmrs_pusch.cuh"
#include "generate_ul_rs.cuh"
#include "generate_psuedo_random_seq.cuh"
__global__ void generate_reference_signal(cufftComplex* dmrs2_d, int w_vector, int M_sc_rb) {
int x_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (x_idx >= M_sc_rb)
return;
dmrs2_d[x_idx] = w_vector * dmrs2_d[x_idx];
}
void generate_dmrs_pusch(int N_subfr, int N_id_cell, int delta_ss, bool group_hopping_enabled, bool sequence_hopping_enabled, int cyclic_shift, int cyclic_shift_dci, char* w_config, int N_prbs, int layer, cufftComplex** dmrs1_d, cufftComplex** dmrs2_d, cufftComplex* x_q_d, cudaStream_t stream_dmrs)
{
//Calculate M_sc_rb (called in generate_ul_rs M_sc_rs)
int M_sc_rb = N_prbs*N_sc_rb;
//Calculate N_s
int N_s = N_subfr * 2;
//Set lambda
int lambda = layer;
//Calculate f_ss_pusch
int f_ss_pusch = ((N_id_cell % 30) + delta_ss) % 30;
//Generate c
Byte* c = (Byte*)malloc(sizeof(Byte)* 8 * N_ul_symb * 20);
int c_init = floor(N_id_cell / 30) * 32 + f_ss_pusch;
generate_psuedo_random_seq(&c, 8 * N_ul_symb * 20, 0, 0, c_init); //added c_init in N_id_cell according to ahmed nour
//Calculate n_pn_ns
int n_pn_ns_1 = c[8 * N_ul_symb*N_s + 0] + c[8 * N_ul_symb*N_s + 1] * 2 + c[8 * N_ul_symb*N_s + 2] * 4 + c[8 * N_ul_symb*N_s + 3] * 8 + c[8 * N_ul_symb*N_s + 4] * 16 + c[8 * N_ul_symb*N_s + 5] * 32 + c[8 * N_ul_symb*N_s + 6] * 64 + c[8 * N_ul_symb*N_s + 7] * 128;
int n_pn_ns_2 = c[8 * N_ul_symb*(N_s + 1) + 0] + c[8 * N_ul_symb*(N_s + 1) + 1]*2 + c[8 * N_ul_symb*(N_s + 1) + 2]*4 + c[8 * N_ul_symb*(N_s + 1) + 3]*8 + c[8 * N_ul_symb*(N_s + 1) + 4]*16 + c[8 * N_ul_symb*(N_s + 1) + 5]*32 + c[8 * N_ul_symb*(N_s + 1) + 6]*64 + c[8 * N_ul_symb*(N_s + 1) + 7]*128;
//Determine n_1_dmrs
int n_1_dmrs = N_1_DMRS[cyclic_shift];
//Determine n_2_dmrs_lambda
int n_2_dmrs_lambda = N_2_DMRS_LAMBDA[cyclic_shift_dci][lambda];
//Calculate n_cs_lambda
int n_cs_lambda_1 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_1) % 12;
int n_cs_lambda_2 = (n_1_dmrs + n_2_dmrs_lambda + n_pn_ns_2) % 12;
//Calculate alpha_lambda
float alpha_lambda_1 = 2 * PI *n_cs_lambda_1 / (float)12;
float alpha_lambda_2 = 2 * PI *n_cs_lambda_2 / (float)12;
//Generate the base reference signal
generate_ul_rs(N_s, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_1, N_prbs, &*dmrs1_d, x_q_d, &stream_dmrs);
generate_ul_rs(N_s+1, N_id_cell, "pusch", delta_ss, group_hopping_enabled, sequence_hopping_enabled, alpha_lambda_2, N_prbs, &*dmrs2_d, x_q_d, &stream_dmrs);
//Determine w vector
int w_vector;
if (!strcmp(w_config, "fixed"))
{
w_vector = 1;
}
else
{
w_vector = W_VECTOR[cyclic_shift_dci*4 + lambda];
}
//Generate the PUSCH demodulation reference signal sequence
generate_reference_signal << < 2, 1024, 0, stream_dmrs >> >(*dmrs2_d, w_vector, M_sc_rb);
}
|
0bb265083258c67ecfc1f1c40c2e6d2b6b9d3f63.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/strings/convert/convert_booleans.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <strings/utilities.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
namespace cudf
{
namespace strings
{
namespace detail
{
// Convert strings column to boolean column
std::unique_ptr<column> to_booleans( strings_column_view const& strings,
string_scalar const& true_string = string_scalar("true"),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_numeric_column( data_type{BOOL8}, 0 );
CUDF_EXPECTS( true_string.is_valid() && true_string.size()>0, "Parameter true_string must not be empty.");
auto d_true = string_view( true_string.data(), true_string.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column copying the strings' null-mask
auto results = make_numeric_column( data_type{BOOL8}, strings_count,
copy_bitmask( strings.parent(), stream, mr), strings.null_count(), stream, mr);
auto results_view = results->mutable_view();
auto d_results = results_view.data<bool>();
thrust::transform( rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
[d_strings, d_true] __device__ (size_type idx) {
bool result = false;
if( !d_strings.is_null(idx) )
result = d_strings.element<string_view>(idx).compare(d_true)==0;
return result;
});
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<column> to_booleans( strings_column_view const& strings,
string_scalar const& true_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_booleans(strings, true_string, mr );
}
namespace detail
{
// Convert boolean column to strings column
std::unique_ptr<column> from_booleans( column_view const& booleans,
string_scalar const& true_string = string_scalar("true"),
string_scalar const& false_string = string_scalar("false"),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
size_type strings_count = booleans.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( booleans.type().id()==BOOL8, "Input column must be boolean type" );
CUDF_EXPECTS( true_string.is_valid() && true_string.size()>0, "Parameter true_string must not be empty.");
auto d_true = string_view( true_string.data(), true_string.size());
CUDF_EXPECTS( false_string.is_valid() && false_string.size()>0, "Parameter false_string must not be empty.");
auto d_false = string_view( false_string.data(), false_string.size());
auto column = column_device_view::create(booleans, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(booleans,stream,mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
[d_column, d_true, d_false] __device__ (size_type idx) {
if( d_column.is_null(idx) )
return 0;
size_type bytes = 0;
if( d_column.element<bool>(idx) )
bytes = d_true.size_bytes();
else
bytes = d_false.size_bytes();
return bytes;
});
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, booleans.null_count(), bytes, mr, stream );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_column, d_true, d_false, d_offsets, d_chars] __device__ (size_type idx) {
if( d_column.is_null(idx) )
return;
string_view result = ( d_column.element<bool>(idx) ? d_true : d_false );
memcpy( d_chars + d_offsets[idx], result.data(), result.size_bytes() );
});
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
booleans.null_count(), std::move(null_mask), stream, mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_booleans( column_view const& booleans,
string_scalar const& true_string,
string_scalar const& false_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_booleans(booleans,true_string,false_string,mr);
}
} // namespace strings
} // namespace cudf
| 0bb265083258c67ecfc1f1c40c2e6d2b6b9d3f63.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/strings/convert/convert_booleans.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <strings/utilities.hpp>
#include <strings/utilities.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
namespace cudf
{
namespace strings
{
namespace detail
{
// Convert strings column to boolean column
std::unique_ptr<column> to_booleans( strings_column_view const& strings,
string_scalar const& true_string = string_scalar("true"),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_numeric_column( data_type{BOOL8}, 0 );
CUDF_EXPECTS( true_string.is_valid() && true_string.size()>0, "Parameter true_string must not be empty.");
auto d_true = string_view( true_string.data(), true_string.size());
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column copying the strings' null-mask
auto results = make_numeric_column( data_type{BOOL8}, strings_count,
copy_bitmask( strings.parent(), stream, mr), strings.null_count(), stream, mr);
auto results_view = results->mutable_view();
auto d_results = results_view.data<bool>();
thrust::transform( rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
[d_strings, d_true] __device__ (size_type idx) {
bool result = false;
if( !d_strings.is_null(idx) )
result = d_strings.element<string_view>(idx).compare(d_true)==0;
return result;
});
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<column> to_booleans( strings_column_view const& strings,
string_scalar const& true_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_booleans(strings, true_string, mr );
}
namespace detail
{
// Convert boolean column to strings column
std::unique_ptr<column> from_booleans( column_view const& booleans,
string_scalar const& true_string = string_scalar("true"),
string_scalar const& false_string = string_scalar("false"),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
size_type strings_count = booleans.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( booleans.type().id()==BOOL8, "Input column must be boolean type" );
CUDF_EXPECTS( true_string.is_valid() && true_string.size()>0, "Parameter true_string must not be empty.");
auto d_true = string_view( true_string.data(), true_string.size());
CUDF_EXPECTS( false_string.is_valid() && false_string.size()>0, "Parameter false_string must not be empty.");
auto d_false = string_view( false_string.data(), false_string.size());
auto column = column_device_view::create(booleans, stream);
auto d_column = *column;
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(booleans,stream,mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
[d_column, d_true, d_false] __device__ (size_type idx) {
if( d_column.is_null(idx) )
return 0;
size_type bytes = 0;
if( d_column.element<bool>(idx) )
bytes = d_true.size_bytes();
else
bytes = d_false.size_bytes();
return bytes;
});
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, booleans.null_count(), bytes, mr, stream );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_column, d_true, d_false, d_offsets, d_chars] __device__ (size_type idx) {
if( d_column.is_null(idx) )
return;
string_view result = ( d_column.element<bool>(idx) ? d_true : d_false );
memcpy( d_chars + d_offsets[idx], result.data(), result.size_bytes() );
});
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
booleans.null_count(), std::move(null_mask), stream, mr);
}
} // namespace detail
// external API
std::unique_ptr<column> from_booleans( column_view const& booleans,
string_scalar const& true_string,
string_scalar const& false_string,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::from_booleans(booleans,true_string,false_string,mr);
}
} // namespace strings
} // namespace cudf
|
1d9b7fcc6cbaa1d63489b94c088a9f85b969c2b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Driver for routine EIGSRT */
#include <stdio.h>
//#include "nr.h"
//#include "nrutil.h"
#include <sys/time.h>
#include <stdlib.h>
//#include"jacobiMultipCUDA.h"
//#define NP 128
#include <stdbool.h>
#define NR_END 1
#define FREE_ARG char*
void get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
void nrerror(char error_text[])
/* Numerical Recipes standard error handler */
{
fprintf(stderr,"Numerical Recipes run-time error...\n");
fprintf(stderr,"%s\n",error_text);
fprintf(stderr,"...now exiting to system...\n");
exit(1);
}
float *vector(long nl, long nh)
/* allocate a float vector with subscript range v[nl..nh] */
{
float *v;
v=(float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float)));
if (!v) nrerror("allocation failure in vector()");
return v-nl+NR_END;
}
float *matrix(long nrl, long nrh, long ncl, long nch)
/* allocate a float matrix with subscript range m[nrl..nrh][ncl..nch] */
{
long N,
nrow=nrh-nrl+1,
ncol=nch-ncl+1;
N = (nrow+1)*(ncol+1);
float *m;
m=(float *) malloc((size_t)(N*sizeof(float)));
if (!m) nrerror("allocation failure in matrix()");
return m;
}
float* convert_matrix( float *a, long nrl, long nrh, long ncl, long nch)
{
/* admpffpsjfpsdjpfsfpffssfpdofdpofdofsop */
long i,j,k,N,
nrow=nrh-nrl+1, ncol=nch-ncl+1;
N = (nrow+1)*(ncol+1);
float *m;
m=(float *) malloc((size_t)(N*sizeof(float)));
for (i = 1,k=0; i <= nrow; ++i)
for (j = 1; j <= ncol; ++j,k++)
m[i*(ncol)+j]=a[k];
if (!m) nrerror("allocation failure in convert_matrix()");
// for (i = 1; i <= nrow; ++i){
// printf("\n");
// for (j = 1; j <= ncol; ++j)
// printf("[%f]",m[i*ncol+j]);
// }
return m;
}
void free_vector(float *v,long nl,long nh){
/* free a float vector allocated with vector() */
free((FREE_ARG) (v+nl-NR_END));
}
//void jacobiMultip (float *mat, int n, int ndm, float *eigvec, float eigval[], int *nrot);
void max_elem(int *piv_elem,int n,float *mat){
int r,c;
int max_i = 1; //first coordenate i
int max_j = 2; //first coordenate j
//#pragma acc loop
for (r = 1; r <= n-1; r++)
for (c = r+1; c <= n; c++)
if(fabs(mat[r*n+c]) > fabs(mat[max_i*n+max_j])){ //if exists a higher element
max_i = r; //replace new coor
max_j = c;
}
piv_elem[0] = max_i; //store new coordenates
piv_elem[1] = max_j;
}
float cal_tan(int max_i,int max_j,float *mat, int n){
float num;
float den;
float a1;
float a2;
float a3;
num = 2 * (mat[max_i*n+max_j]);
if(mat[max_i*n+max_i] < mat[max_i*n+max_i])
num = -num;
a1 = mat[max_i*n+max_i] - mat[max_j*n+max_j];
a2 = a1*a1;
a3 = 4 * mat[max_i*n+max_j]*mat[max_i*n+max_j];
den = a2 + a3;
den = sqrt(den);
den = abs(a1) + den;
return num/den;
}
float cal_cos(float tang){ //cos = 1/(1+tan^2)
float cose;
cose = 1 + (tang * tang);
cose = sqrt(cose);
cose = 1 / cose;
return cose;
}
float cal_sin(float cose, float tang){ //sin = cos*tan
float sino;
sino = cose*tang;
return sino;
}
void mat_mult(int n,float *mat, float *T,float *mat_temp){
int i,j,k;
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){ //Premultiplication
for (j = 1 ; j <= n ; j++ ){
mat_temp[i*n+j] = 0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
mat_temp[i*n+j] += T[k*n+i] * mat[k*n+j];
}
}
}
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){ //Postmultiplication
for (j = 1 ; j <= n ; j++ ){
mat[i*n+j] = 0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
mat[i*n+j] += mat_temp[i*n+k] * T[k*n+j];
}
}
}
}
void mat_mult2(int n,float *A, float *B,float *C){
int i,j,k;
for (i = 1 ; i <= n ; i++ ){
for (j = 1 ; j <= n ; j++ ){
C[i*n+j] = 0.0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
C[i*n+j] += A[i*n+k] * B[k*n+j];
}
}
}
}
void mat_mult_inv(int n,float *A, float *B,float *C){
int i,j,k;
for (i = 1 ; i <= n ; i++ ){
for (j = 1 ; j <= n ; j++ ){
C[i*n+j] = 0.0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
C[i*n+j] += A[k*n+i] * B[k*n+j];
}
}
}
}
void mult_eigenvec(int n,float *T,float *eigvec,float *mat_temp){
int i,j,k;
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){ //eigenvec = eigenvec * T
for (j = 1 ; j <= n ; j++ ){
mat_temp[i*n+j] = 0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
mat_temp[i*n+j] += eigvec[k*n+i] * T[k*n+j];
}
}
}
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){
for (j = 1 ; j <= n ; j++ ){
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
eigvec[i*n+j] = mat_temp[i*n+j] ;
}
}
}
}
void new_T_mat(int max_i, int max_j,int n,float *mat,float *T){
float tang, cose, sino;
int c,r;
tang = cal_tan(max_i,max_j,mat,n);
cose = cal_cos(tang);
sino = cal_sin(cose,tang);
for (r = 1; r <= n; r++){ //Generate identity matrix
for (c = 1; c <= n; c++)
T[r*n+c] = 0.0;
T[r*n+r] = 1.0;
}
//T Rotating matrix
T[max_i*n+max_i] = cose;
T[max_j*n+max_j] = cose;
T[max_i*n+max_j] = -sino; //Element to eliminate
T[max_j*n+max_i] = sino;
}
__global__ void kernel_mat_mult_inv(int *n,float *A, float *B,float *C){
float Cvalue = 0.0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int e;
//printf("n: %d\n", *n);
if(row >= (*n) || col >= (*n))
return;
for (e = 1; e <= (*n); ++e)
Cvalue += (A[e * (*n) + col]) * (B[e * (*n) + col]);
C[row * (*n) + col] = Cvalue;
}
__global__ void kernel_mat_mult(int *n,float *A, float *B,float *C){
float Cvalue = 0.0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int e;
//printf("n: %d\n", *n);
if(row >= (*n) || col >= (*n))
return;
for (e = 1; e <= (*n); e++)
Cvalue += (A[row * (*n) + e]) * (B[e * (*n) + col]);
C[row * (*n) + col] = Cvalue;
}
void jacobiMultip (float *mat, int n, int ndm, float *eigvec, float eigval[], int *nrot){
/*****************************+*******************************/
//On input
//a: Contains the matrix to be diagonalized.
//n: Order of matrix a.
//ndim: Dimension of a.
//eigvec: eigenvectors to be computed v
//On output
//eigval: Contains the eigenvalues in ascending order d
//u: Contains the corresponding eigenvectors.
//nrot: Number of Jacobi rotations.
/*****************************+*******************************/
int i,j;
int *piv_elem; //Keep coordenates of an elemnt i,j
bool min = false;
float EPS = .0000001;
float *T;
float *mat_temp;
int *d_n;
float *d_mat;
float *d_T;
float *d_mat_temp;
size_t size = (n+1) * (n+1) * sizeof(float);
hipError_t err;
dim3 dimGrid( 512 ); // 512 x 1 x 1
dim3 dimBlock( 1024, 1024 ); // 1024 x 1024 x 1
hipMalloc(&d_n,sizeof (int));
hipMalloc(&d_mat, size);
hipMalloc(&d_T, size);
hipMalloc(&d_mat_temp, size);
hipMemcpy(d_n, &n, sizeof (int), hipMemcpyHostToDevice);
hipMemcpy(d_mat_temp, &mat_temp, size, hipMemcpyHostToDevice);
//mat_temp = (float *) vector(1,n*n);
//T=(float *) vector(1,n*n);
piv_elem=(int *) malloc (2 * sizeof (int));
for (i = 1; i <= n; i++){ //Initializing Identity matrix
for (j = 1; j <= n; j++)
eigvec[i*n+j] = 0.0;
eigvec[i*n+i] = 1.0;
}
//CPU
for (*nrot = 0; min == false ; ++*nrot){
max_elem(piv_elem,n,mat); //Search for max element in tringle up
if(fabs(mat[piv_elem[0]*n+piv_elem[1]]) < EPS || *nrot >= 100 ) //if max element doesnt exist more
min=true;
else{
new_T_mat(piv_elem[0],piv_elem[1],n,mat,T); //Calculate T matrix
//mult_eigenvec(n,T,eigvec,mat_temp); //Compute eigenvec
mat_mult_inv(n,T,mat,mat_temp);
mat_mult2(n,mat_temp,T,mat);
// hipMemcpy(d_mat, &mat, size, hipMemcpyHostToDevice);
// hipMemcpy(d_T, &T, size, hipMemcpyHostToDevice);
// kernel_mat_mult_inv<<<dimGrid, dimBlock>>>(d_n,d_T,d_mat,d_mat_temp);
// kernel_mat_mult<<<dimGrid, dimBlock>>>(d_n,d_mat_temp,d_T,d_mat);
// hipMemcpy(&mat, d_mat, size, hipMemcpyDeviceToHost);
//err=hipMemcpy(&mat, d_mat, size, hipMemcpyDeviceToHost);
//printf("Copy MAT off of device: %s\n",hipGetErrorString(err));
printf("\nRotacin: %d\n",*nrot );
for (i = 1; i <= n; ++i){
printf("\n");
for (j = 1; j <= n; ++j)
printf("[%f]",mat[i*n+j]);
}
}
for (i = 1; i <= n; ++i)
eigval[i]=mat[i*n+i];
}
hipFree(d_mat);
hipFree(d_T);
hipFree(d_mat_temp);
//free(mat_temp);
//*nrot = nrota;
//printf("rooooooot %d\n", *nrot);
// free_vector(mat_temp,1,n*n);
// free_vector(T,1,n);
// free_vector(piv_elem,1,1);
}
int main(int argc, char **argv)
{
int NP;
char *nombreArchivo=argv[1];
double S,E;
int i, j, nrot=0;
FILE *archivo;
float *c;
if (fopen(nombreArchivo, "r") == NULL){
printf("File not found\n");
return 1;
}else{
archivo = fopen(nombreArchivo, "r");
fscanf(archivo, "%d", &NP);
c =(float *)matrix(1,NP-1,1,NP-1);
for (i = 0; i < NP; i++){
for (j = 0; j < NP; j++){
fscanf(archivo, "%f", &c[i*NP+j]);
}
}
fclose(archivo);
}
float *d, *v, *e;
d=(float *)vector(1,NP);
v=(float *)matrix(1,NP,1,NP);
e=(float *)convert_matrix(c,1,NP,1,NP);
for (i = 1; i <= NP; ++i){
printf("\n");
for (j = 1; j <= NP; ++j)
printf("[%f]",e[i*NP+j]);
}
printf("\n****** Finding Eigenvectors ******\n");
//jacobi(e,NP,d,v,&nrot);
get_walltime(&S);
jacobiMultip(e,NP,NP,v,d,&nrot);
get_walltime(&E);
for (i = 1; i <= NP; ++i){
printf("\n");
for (j = 1; j <= NP; ++j)
printf("[%f]",v[i*NP+j]);
}
printf("\nd\n");
for (i = 1; i <= NP; ++i)
{
printf("[%f]",d[i]);
}
printf("\n****** Eigenvalues & Eigenvectors ******\n");
for (i=1;i<=NP; i++) {
printf("eigenvalue %3d, = %12.6f\n",i,d[i]);
printf("eigenvector:\n");
for (j=1;j<=NP; j++) {
printf("%12.6f",v[i*NP+j]);
if ((j % 5) == 0) printf("\n");
}
printf("\n");
}
printf("Rotations: %d\n",nrot );
printf("Total time:%f sec\n", E-S);
//free_vector(d,1,NP);
//free_vector(v,1,NP*NP);
//free_vector(e,1,NP*NP);
// free(d);
// free(v);
// free(e);
return 0;
}
| 1d9b7fcc6cbaa1d63489b94c088a9f85b969c2b0.cu | /* Driver for routine EIGSRT */
#include <stdio.h>
//#include "nr.h"
//#include "nrutil.h"
#include <sys/time.h>
#include <stdlib.h>
//#include"jacobiMultipCUDA.h"
//#define NP 128
#include <stdbool.h>
#define NR_END 1
#define FREE_ARG char*
void get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
void nrerror(char error_text[])
/* Numerical Recipes standard error handler */
{
fprintf(stderr,"Numerical Recipes run-time error...\n");
fprintf(stderr,"%s\n",error_text);
fprintf(stderr,"...now exiting to system...\n");
exit(1);
}
float *vector(long nl, long nh)
/* allocate a float vector with subscript range v[nl..nh] */
{
float *v;
v=(float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float)));
if (!v) nrerror("allocation failure in vector()");
return v-nl+NR_END;
}
float *matrix(long nrl, long nrh, long ncl, long nch)
/* allocate a float matrix with subscript range m[nrl..nrh][ncl..nch] */
{
long N,
nrow=nrh-nrl+1,
ncol=nch-ncl+1;
N = (nrow+1)*(ncol+1);
float *m;
m=(float *) malloc((size_t)(N*sizeof(float)));
if (!m) nrerror("allocation failure in matrix()");
return m;
}
float* convert_matrix( float *a, long nrl, long nrh, long ncl, long nch)
{
/* admpffpsjfpsdjpfsfpffssfpdofdpofdofsop */
long i,j,k,N,
nrow=nrh-nrl+1, ncol=nch-ncl+1;
N = (nrow+1)*(ncol+1);
float *m;
m=(float *) malloc((size_t)(N*sizeof(float)));
for (i = 1,k=0; i <= nrow; ++i)
for (j = 1; j <= ncol; ++j,k++)
m[i*(ncol)+j]=a[k];
if (!m) nrerror("allocation failure in convert_matrix()");
// for (i = 1; i <= nrow; ++i){
// printf("\n");
// for (j = 1; j <= ncol; ++j)
// printf("[%f]",m[i*ncol+j]);
// }
return m;
}
void free_vector(float *v,long nl,long nh){
/* free a float vector allocated with vector() */
free((FREE_ARG) (v+nl-NR_END));
}
//void jacobiMultip (float *mat, int n, int ndm, float *eigvec, float eigval[], int *nrot);
void max_elem(int *piv_elem,int n,float *mat){
int r,c;
int max_i = 1; //first coordenate i
int max_j = 2; //first coordenate j
//#pragma acc loop
for (r = 1; r <= n-1; r++)
for (c = r+1; c <= n; c++)
if(fabs(mat[r*n+c]) > fabs(mat[max_i*n+max_j])){ //if exists a higher element
max_i = r; //replace new coor
max_j = c;
}
piv_elem[0] = max_i; //store new coordenates
piv_elem[1] = max_j;
}
float cal_tan(int max_i,int max_j,float *mat, int n){
float num;
float den;
float a1;
float a2;
float a3;
num = 2 * (mat[max_i*n+max_j]);
if(mat[max_i*n+max_i] < mat[max_i*n+max_i])
num = -num;
a1 = mat[max_i*n+max_i] - mat[max_j*n+max_j];
a2 = a1*a1;
a3 = 4 * mat[max_i*n+max_j]*mat[max_i*n+max_j];
den = a2 + a3;
den = sqrt(den);
den = abs(a1) + den;
return num/den;
}
float cal_cos(float tang){ //cos = 1/√(1+tan^2)
float cose;
cose = 1 + (tang * tang);
cose = sqrt(cose);
cose = 1 / cose;
return cose;
}
float cal_sin(float cose, float tang){ //sin = cos*tan
float sino;
sino = cose*tang;
return sino;
}
void mat_mult(int n,float *mat, float *T,float *mat_temp){
int i,j,k;
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){ //Premultiplication
for (j = 1 ; j <= n ; j++ ){
mat_temp[i*n+j] = 0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
mat_temp[i*n+j] += T[k*n+i] * mat[k*n+j];
}
}
}
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){ //Postmultiplication
for (j = 1 ; j <= n ; j++ ){
mat[i*n+j] = 0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
mat[i*n+j] += mat_temp[i*n+k] * T[k*n+j];
}
}
}
}
void mat_mult2(int n,float *A, float *B,float *C){
int i,j,k;
for (i = 1 ; i <= n ; i++ ){
for (j = 1 ; j <= n ; j++ ){
C[i*n+j] = 0.0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
C[i*n+j] += A[i*n+k] * B[k*n+j];
}
}
}
}
void mat_mult_inv(int n,float *A, float *B,float *C){
int i,j,k;
for (i = 1 ; i <= n ; i++ ){
for (j = 1 ; j <= n ; j++ ){
C[i*n+j] = 0.0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
C[i*n+j] += A[k*n+i] * B[k*n+j];
}
}
}
}
void mult_eigenvec(int n,float *T,float *eigvec,float *mat_temp){
int i,j,k;
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){ //eigenvec = eigenvec * T
for (j = 1 ; j <= n ; j++ ){
mat_temp[i*n+j] = 0;
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
mat_temp[i*n+j] += eigvec[k*n+i] * T[k*n+j];
}
}
}
//#pragma acc loop
for (i = 1 ; i <= n ; i++ ){
for (j = 1 ; j <= n ; j++ ){
//#pragma acc loop
for (k = 1 ; k <= n ; k++ ){
eigvec[i*n+j] = mat_temp[i*n+j] ;
}
}
}
}
void new_T_mat(int max_i, int max_j,int n,float *mat,float *T){
float tang, cose, sino;
int c,r;
tang = cal_tan(max_i,max_j,mat,n);
cose = cal_cos(tang);
sino = cal_sin(cose,tang);
for (r = 1; r <= n; r++){ //Generate identity matrix
for (c = 1; c <= n; c++)
T[r*n+c] = 0.0;
T[r*n+r] = 1.0;
}
//T Rotating matrix
T[max_i*n+max_i] = cose;
T[max_j*n+max_j] = cose;
T[max_i*n+max_j] = -sino; //Element to eliminate
T[max_j*n+max_i] = sino;
}
__global__ void kernel_mat_mult_inv(int *n,float *A, float *B,float *C){
float Cvalue = 0.0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int e;
//printf("n: %d\n", *n);
if(row >= (*n) || col >= (*n))
return;
for (e = 1; e <= (*n); ++e)
Cvalue += (A[e * (*n) + col]) * (B[e * (*n) + col]);
C[row * (*n) + col] = Cvalue;
}
__global__ void kernel_mat_mult(int *n,float *A, float *B,float *C){
float Cvalue = 0.0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int e;
//printf("n: %d\n", *n);
if(row >= (*n) || col >= (*n))
return;
for (e = 1; e <= (*n); e++)
Cvalue += (A[row * (*n) + e]) * (B[e * (*n) + col]);
C[row * (*n) + col] = Cvalue;
}
void jacobiMultip (float *mat, int n, int ndm, float *eigvec, float eigval[], int *nrot){
/*****************************+*******************************/
//On input
//a: Contains the matrix to be diagonalized.
//n: Order of matrix a.
//ndim: Dimension of a.
//eigvec: eigenvectors to be computed v
//On output
//eigval: Contains the eigenvalues in ascending order d
//u: Contains the corresponding eigenvectors.
//nrot: Number of Jacobi rotations.
/*****************************+*******************************/
int i,j;
int *piv_elem; //Keep coordenates of an elemnt i,j
bool min = false;
float EPS = .0000001;
float *T;
float *mat_temp;
int *d_n;
float *d_mat;
float *d_T;
float *d_mat_temp;
size_t size = (n+1) * (n+1) * sizeof(float);
cudaError_t err;
dim3 dimGrid( 512 ); // 512 x 1 x 1
dim3 dimBlock( 1024, 1024 ); // 1024 x 1024 x 1
cudaMalloc(&d_n,sizeof (int));
cudaMalloc(&d_mat, size);
cudaMalloc(&d_T, size);
cudaMalloc(&d_mat_temp, size);
cudaMemcpy(d_n, &n, sizeof (int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_temp, &mat_temp, size, cudaMemcpyHostToDevice);
//mat_temp = (float *) vector(1,n*n);
//T=(float *) vector(1,n*n);
piv_elem=(int *) malloc (2 * sizeof (int));
for (i = 1; i <= n; i++){ //Initializing Identity matrix
for (j = 1; j <= n; j++)
eigvec[i*n+j] = 0.0;
eigvec[i*n+i] = 1.0;
}
//CPU
for (*nrot = 0; min == false ; ++*nrot){
max_elem(piv_elem,n,mat); //Search for max element in tringle up
if(fabs(mat[piv_elem[0]*n+piv_elem[1]]) < EPS || *nrot >= 100 ) //if max element doesnt exist more
min=true;
else{
new_T_mat(piv_elem[0],piv_elem[1],n,mat,T); //Calculate T matrix
//mult_eigenvec(n,T,eigvec,mat_temp); //Compute eigenvec
mat_mult_inv(n,T,mat,mat_temp);
mat_mult2(n,mat_temp,T,mat);
// cudaMemcpy(d_mat, &mat, size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_T, &T, size, cudaMemcpyHostToDevice);
// kernel_mat_mult_inv<<<dimGrid, dimBlock>>>(d_n,d_T,d_mat,d_mat_temp);
// kernel_mat_mult<<<dimGrid, dimBlock>>>(d_n,d_mat_temp,d_T,d_mat);
// cudaMemcpy(&mat, d_mat, size, cudaMemcpyDeviceToHost);
//err=cudaMemcpy(&mat, d_mat, size, cudaMemcpyDeviceToHost);
//printf("Copy MAT off of device: %s\n",cudaGetErrorString(err));
printf("\nRotación: %d\n",*nrot );
for (i = 1; i <= n; ++i){
printf("\n");
for (j = 1; j <= n; ++j)
printf("[%f]",mat[i*n+j]);
}
}
for (i = 1; i <= n; ++i)
eigval[i]=mat[i*n+i];
}
cudaFree(d_mat);
cudaFree(d_T);
cudaFree(d_mat_temp);
//free(mat_temp);
//*nrot = nrota;
//printf("rooooooot %d\n", *nrot);
// free_vector(mat_temp,1,n*n);
// free_vector(T,1,n);
// free_vector(piv_elem,1,1);
}
int main(int argc, char **argv)
{
int NP;
char *nombreArchivo=argv[1];
double S,E;
int i, j, nrot=0;
FILE *archivo;
float *c;
if (fopen(nombreArchivo, "r") == NULL){
printf("File not found\n");
return 1;
}else{
archivo = fopen(nombreArchivo, "r");
fscanf(archivo, "%d", &NP);
c =(float *)matrix(1,NP-1,1,NP-1);
for (i = 0; i < NP; i++){
for (j = 0; j < NP; j++){
fscanf(archivo, "%f", &c[i*NP+j]);
}
}
fclose(archivo);
}
float *d, *v, *e;
d=(float *)vector(1,NP);
v=(float *)matrix(1,NP,1,NP);
e=(float *)convert_matrix(c,1,NP,1,NP);
for (i = 1; i <= NP; ++i){
printf("\n");
for (j = 1; j <= NP; ++j)
printf("[%f]",e[i*NP+j]);
}
printf("\n****** Finding Eigenvectors ******\n");
//jacobi(e,NP,d,v,&nrot);
get_walltime(&S);
jacobiMultip(e,NP,NP,v,d,&nrot);
get_walltime(&E);
for (i = 1; i <= NP; ++i){
printf("\n");
for (j = 1; j <= NP; ++j)
printf("[%f]",v[i*NP+j]);
}
printf("\nd\n");
for (i = 1; i <= NP; ++i)
{
printf("[%f]",d[i]);
}
printf("\n****** Eigenvalues & Eigenvectors ******\n");
for (i=1;i<=NP; i++) {
printf("eigenvalue %3d, = %12.6f\n",i,d[i]);
printf("eigenvector:\n");
for (j=1;j<=NP; j++) {
printf("%12.6f",v[i*NP+j]);
if ((j % 5) == 0) printf("\n");
}
printf("\n");
}
printf("Rotations: %d\n",nrot );
printf("Total time:%f sec\n", E-S);
//free_vector(d,1,NP);
//free_vector(v,1,NP*NP);
//free_vector(e,1,NP*NP);
// free(d);
// free(v);
// free(e);
return 0;
}
|
a003a4217f4fa51782836dca315e1a410d6a2e6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <stdlib.h>
//#include <common\book.h>
#define DIM 512
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"\nGPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
#define min(a,b) (a<b)?a:b
__global__ void kernel(float *index,int *min_holder)
{
__shared__ float tmp[DIM];
int idx = threadIdx.x+blockIdx.x*blockDim.x;
int local_index = threadIdx.x;
int row_idx = blockIdx.x;
__shared__ int min_index[DIM];
int size = DIM/2;
tmp[local_index] = index[idx];
min_index[local_index] = local_index;
__syncthreads();
while(size)
{
if(local_index<size)
{
if(tmp[local_index+size]<tmp[local_index])
{
tmp[local_index]= tmp[local_index+size];
min_index[local_index] = min_index[local_index+size];
}
}
size/=2;
__syncthreads();
}
if(local_index==0)
{
min_holder[row_idx] = min_index[0];
}
}
int main()
{
char file_name[255];// = "in.txt";
ofstream fout("out.txt");
cout<<"Please enter the file path to the distance matrix: ";
cin.getline(file_name,255);
std::vector<char> buffer(64 * 1024 * 1024);
fstream fin;
fin.rdbuf()->pubsetbuf(&buffer[0],buffer.size());
fin.open(file_name);
//hipDeviceProp_t deviceProp;
//hipGetDeviceProperties(&deviceProp, 0);
//cout<<deviceProp.name<<" has compute capability "<<deviceProp.major<<","<< deviceProp.minor<<endl;
int size = INT_MIN;
int r=0,c=0;
fin>>size;
int pitch=ceil((double)size/DIM);
float *indexs=new float[size*size];
int *min_holder = new int[size*pitch];
float *indexes_d;
int *min_holder_d;
hipMalloc(&indexes_d,size*size*sizeof(float));
hipMalloc(&min_holder_d,(size*pitch)*sizeof(int));
bool *mark = new bool[size+1];
for(int i=0; i<2000; i++)
{
indexs[i]=INT_MAX;
}
for(int i=0; i<size+1; i++)
mark[i]=true;
r=c=0;
char tmp[255];
cout<<"Reading input file";
fin>>tmp;
//cout<<tmp;
while(1)
{
/*fin>>r>>c;
r--;
c--;*/
fin>>indexs[r*size+c];
c++; //:D
//cout<<".";
if(c==size)
{
mark[r]=false;
r++;
c=0;
//cout<<endl;
if(r<size)
{
fin>>tmp;
}
else
break;
}
}
cout<<" ..."<<endl;
//cout<<size<<endl;
//size--;
int index=0;
int handler=size;
float min;
float time;
float time_total=0;
cout<<"Working ";
dim3 blocks(size*pitch);
dim3 threads(512);
while(handler)
{
cout<<".";
min= INT_MAX;
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//GPU code
hipMemcpy(indexes_d,indexs,size*size*sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(threads), 0, 0, indexes_d,min_holder_d);
gpuErrchk(hipMemcpy(min_holder,min_holder_d,(size*pitch)*sizeof(int),hipMemcpyDeviceToHost));// end of GPU code
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time,start,stop);
time_total+=time;
if(time==0)
{
cout<<"\nSomething went wrong on GPU."<<endl;
exit(0);
}
//cout<<"Time this round: "<<time<<endl;
//for(int i=0; i<size*size ; i ++ )
//cout<<i<<": "<<indexs[i]<<" ";
//cout<<endl;
//getwchar();
bool flag=false;
int trow=-1;
int row=0;
int col=0;
for(int k=0; k<size*pitch; k++)
{
if((k%(pitch))==0)
trow++;
int i = trow*size + min_holder[k];
if(indexs[i]<min)
{
min=indexs[i];
col = pitch*DIM+min_holder[k];
row = trow;
flag=true;
}
}
//cout<<min<<endl;
if(flag)
{
//cout<<row+1<<endl;
fout<<row+1<<endl;
//cout<<col+1<<endl;
fout<<col+1<<endl;
}
//merging two rows and columns
for(int i=0; i<size; i++)
{
indexs[col*size+i]= indexs[row*size+i]=(indexs[row*size+i]+indexs[col*size+i])/2;
indexs[i*size+row]= indexs[i*size+col]=(indexs[i*size+row]+indexs[i*size+col])/2;
indexs[i*size+i]=INT_MAX;
}
indexs[row*size+col] = indexs[col*size+row] = INT_MAX;
handler--;
}
cout<<"\nTime: "<<time_total<<"ms"<<endl;
cout<<"Press Enter to exit.";
getchar();
return 0;
}
| a003a4217f4fa51782836dca315e1a410d6a2e6b.cu | #include <iostream>
#include <fstream>
#include <vector>
#include <stdlib.h>
//#include <common\book.h>
#define DIM 512
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"\nGPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
#define min(a,b) (a<b)?a:b
__global__ void kernel(float *index,int *min_holder)
{
__shared__ float tmp[DIM];
int idx = threadIdx.x+blockIdx.x*blockDim.x;
int local_index = threadIdx.x;
int row_idx = blockIdx.x;
__shared__ int min_index[DIM];
int size = DIM/2;
tmp[local_index] = index[idx];
min_index[local_index] = local_index;
__syncthreads();
while(size)
{
if(local_index<size)
{
if(tmp[local_index+size]<tmp[local_index])
{
tmp[local_index]= tmp[local_index+size];
min_index[local_index] = min_index[local_index+size];
}
}
size/=2;
__syncthreads();
}
if(local_index==0)
{
min_holder[row_idx] = min_index[0];
}
}
int main()
{
char file_name[255];// = "in.txt";
ofstream fout("out.txt");
cout<<"Please enter the file path to the distance matrix: ";
cin.getline(file_name,255);
std::vector<char> buffer(64 * 1024 * 1024);
fstream fin;
fin.rdbuf()->pubsetbuf(&buffer[0],buffer.size());
fin.open(file_name);
//cudaDeviceProp deviceProp;
//cudaGetDeviceProperties(&deviceProp, 0);
//cout<<deviceProp.name<<" has compute capability "<<deviceProp.major<<","<< deviceProp.minor<<endl;
int size = INT_MIN;
int r=0,c=0;
fin>>size;
int pitch=ceil((double)size/DIM);
float *indexs=new float[size*size];
int *min_holder = new int[size*pitch];
float *indexes_d;
int *min_holder_d;
cudaMalloc(&indexes_d,size*size*sizeof(float));
cudaMalloc(&min_holder_d,(size*pitch)*sizeof(int));
bool *mark = new bool[size+1];
for(int i=0; i<2000; i++)
{
indexs[i]=INT_MAX;
}
for(int i=0; i<size+1; i++)
mark[i]=true;
r=c=0;
char tmp[255];
cout<<"Reading input file";
fin>>tmp;
//cout<<tmp;
while(1)
{
/*fin>>r>>c;
r--;
c--;*/
fin>>indexs[r*size+c];
c++; //:D
//cout<<".";
if(c==size)
{
mark[r]=false;
r++;
c=0;
//cout<<endl;
if(r<size)
{
fin>>tmp;
}
else
break;
}
}
cout<<" ..."<<endl;
//cout<<size<<endl;
//size--;
int index=0;
int handler=size;
float min;
float time;
float time_total=0;
cout<<"Working ";
dim3 blocks(size*pitch);
dim3 threads(512);
while(handler)
{
cout<<".";
min= INT_MAX;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//GPU code
cudaMemcpy(indexes_d,indexs,size*size*sizeof(float),cudaMemcpyHostToDevice);
kernel<<<blocks,threads>>>(indexes_d,min_holder_d);
gpuErrchk(cudaMemcpy(min_holder,min_holder_d,(size*pitch)*sizeof(int),cudaMemcpyDeviceToHost));// end of GPU code
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
time_total+=time;
if(time==0)
{
cout<<"\nSomething went wrong on GPU."<<endl;
exit(0);
}
//cout<<"Time this round: "<<time<<endl;
//for(int i=0; i<size*size ; i ++ )
//cout<<i<<": "<<indexs[i]<<" ";
//cout<<endl;
//getwchar();
bool flag=false;
int trow=-1;
int row=0;
int col=0;
for(int k=0; k<size*pitch; k++)
{
if((k%(pitch))==0)
trow++;
int i = trow*size + min_holder[k];
if(indexs[i]<min)
{
min=indexs[i];
col = pitch*DIM+min_holder[k];
row = trow;
flag=true;
}
}
//cout<<min<<endl;
if(flag)
{
//cout<<row+1<<endl;
fout<<row+1<<endl;
//cout<<col+1<<endl;
fout<<col+1<<endl;
}
//merging two rows and columns
for(int i=0; i<size; i++)
{
indexs[col*size+i]= indexs[row*size+i]=(indexs[row*size+i]+indexs[col*size+i])/2;
indexs[i*size+row]= indexs[i*size+col]=(indexs[i*size+row]+indexs[i*size+col])/2;
indexs[i*size+i]=INT_MAX;
}
indexs[row*size+col] = indexs[col*size+row] = INT_MAX;
handler--;
}
cout<<"\nTime: "<<time_total<<"ms"<<endl;
cout<<"Press Enter to exit.";
getchar();
return 0;
}
|
90750cda802b8cb4840f0f0e900e97783526efce.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <thrust/sort.h>
#include <helper_cuda.h>
#include <sys/time.h>
#include <omp.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "mpi.h"
#include "tbb/parallel_sort.h"
#include "printResult.h"
#include "kernel_functions.h"
#include "helper_functions.h"
#include "values.h"
#include "worker.h"
#include <fstream>
using namespace std;
int main(int argc, char* argv[])
{
struct timeval start,end;
//MPI Communication Initalization
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(processor_name,&namelen);
//Register new data type
MPI_Datatype old_types[3];
MPI_Aint indices[3];
int blocklens[3];
blocklens[0] = 1;
blocklens[1] = 1;
blocklens[2] = 1;
old_types[0] = MPI_DOUBLE;
old_types[1] = MPI_DOUBLE;
old_types[2] = MPI_INT;
indices[0] = 0;
indices[1] = sizeof(double);
indices[2] = 2 * sizeof(double);
MPI_Type_struct(3,blocklens,indices,old_types,&mpi_node);
MPI_Type_commit(&mpi_node);
// freopen(processor_name,"w",stdout);
char *referenceTable = argv[1];
int ref_file_num = atoi(argv[2]);
int ref_file_size = atoi(argv[3]);
int ref_file_ignore = atoi(argv[4]);
char *sampleTable = argv[5];
int sam_file_num = atoi(argv[6]);
int sam_file_size = atoi(argv[7]);
int sam_file_ignore = atoi(argv[8]);
ref_N = ref_file_num * ref_file_size / numprocs;
sam_N = sam_file_num * sam_file_size / numprocs;
cout << "ref_N " << ref_N << endl;
cout << "sam_N " << sam_N << endl;
time_t rawtime;
time(&rawtime);
printf("--------------\nRank %d Processor_name %s\n------------------\n",rank,processor_name);
printf("%s starts at %s\n",processor_name,ctime(&rawtime));
mem_allo(ref_file_num * ref_file_size / numprocs, sam_file_num * sam_file_size / numprocs);
load_file_list(referenceTable,ref_file_num,sampleTable,sam_file_num);
gettimeofday(&start,NULL);
load_ref_file(rank,ref_file_num,ref_file_size,ref_file_ignore);
gettimeofday(&end,NULL);
printf("rank-%d load_ref_file %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
load_sam_file(rank,sam_file_num,sam_file_size,sam_file_ignore);
gettimeofday(&end,NULL);
printf("rank-%d load_sam_file %.3f s\n",rank,diffTime(start,end) * 0.001);
time(&rawtime);
printf("rank-%d finishing loading file %s\n",rank,ctime(&rawtime));
omp_set_nested(1);
#pragma omp parallel num_threads(2)
{
struct timeval start,end;
if(omp_get_thread_num() % 2 == 0)
{
gettimeofday(&start,NULL);
computeSI(search_radius);
gettimeofday(&end,NULL);
printf("rank-%d thread-%d computeSI %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
count_ref(rank);
gettimeofday(&end,NULL);
printf("rank-%d thread-%d count_ref %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
}
else
{
gettimeofday(&start,NULL);
indexSample();
gettimeofday(&end,NULL);
printf("rank-%d thread-%d indexSample %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
tbb::parallel_sort(sam_node_buffer,sam_node_buffer + sam_N,cmp);
gettimeofday(&end,NULL);
printf("rank-%d thread-%d sort sample %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
}
}
time(&rawtime);
printf("rank-%d after computeSI/count_ref and indexSample/sortSample %s\n",rank,ctime(&rawtime));
gettimeofday(&start,NULL);
worker_gather(rank);
gettimeofday(&end,NULL);
printf("rank-%d worker_gather %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
cal_samChunk(rank);
gettimeofday(&end,NULL);
printf("rank-%d cal_samChunk %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
redistribute_S(rank);
gettimeofday(&end,NULL);
printf("rank-%d redistribute_S %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
tbb::parallel_sort(h_sam_node,h_sam_node + sam_CM_N,cmp);
for(int i = 0; i < 100; ++i)
printf("%.6lf %.6lf %d\n",h_sam_node[i].ra,h_sam_node[i].dec,h_sam_node[i].pix);
gettimeofday(&end,NULL);
printf("rank-%d sort sample %.3f s\n",rank,diffTime(start,end) * 0.001);
time(&rawtime);
printf("rank-%d after worker_gather / cal_samChunk / redistribute_S / sortSample %s\n",rank,ctime(&rawtime));
// omp_set_nested(1);
gettimeofday(&start,NULL);
exchange_and_crossmatch(rank);
gettimeofday(&end,NULL);
printf("rank-%d redistribute_R %.3f s\n",rank,diffTime(start,end) * 0.001);
time(&rawtime);
printf("%s ends at %s\n",processor_name,ctime(&rawtime));
}
| 90750cda802b8cb4840f0f0e900e97783526efce.cu | #include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <thrust/sort.h>
#include <helper_cuda.h>
#include <sys/time.h>
#include <omp.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include "mpi.h"
#include "tbb/parallel_sort.h"
#include "printResult.h"
#include "kernel_functions.h"
#include "helper_functions.h"
#include "values.h"
#include "worker.h"
#include <fstream>
using namespace std;
int main(int argc, char* argv[])
{
struct timeval start,end;
//MPI Communication Initalization
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(processor_name,&namelen);
//Register new data type
MPI_Datatype old_types[3];
MPI_Aint indices[3];
int blocklens[3];
blocklens[0] = 1;
blocklens[1] = 1;
blocklens[2] = 1;
old_types[0] = MPI_DOUBLE;
old_types[1] = MPI_DOUBLE;
old_types[2] = MPI_INT;
indices[0] = 0;
indices[1] = sizeof(double);
indices[2] = 2 * sizeof(double);
MPI_Type_struct(3,blocklens,indices,old_types,&mpi_node);
MPI_Type_commit(&mpi_node);
// freopen(processor_name,"w",stdout);
char *referenceTable = argv[1];
int ref_file_num = atoi(argv[2]);
int ref_file_size = atoi(argv[3]);
int ref_file_ignore = atoi(argv[4]);
char *sampleTable = argv[5];
int sam_file_num = atoi(argv[6]);
int sam_file_size = atoi(argv[7]);
int sam_file_ignore = atoi(argv[8]);
ref_N = ref_file_num * ref_file_size / numprocs;
sam_N = sam_file_num * sam_file_size / numprocs;
cout << "ref_N " << ref_N << endl;
cout << "sam_N " << sam_N << endl;
time_t rawtime;
time(&rawtime);
printf("--------------\nRank %d Processor_name %s\n------------------\n",rank,processor_name);
printf("%s starts at %s\n",processor_name,ctime(&rawtime));
mem_allo(ref_file_num * ref_file_size / numprocs, sam_file_num * sam_file_size / numprocs);
load_file_list(referenceTable,ref_file_num,sampleTable,sam_file_num);
gettimeofday(&start,NULL);
load_ref_file(rank,ref_file_num,ref_file_size,ref_file_ignore);
gettimeofday(&end,NULL);
printf("rank-%d load_ref_file %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
load_sam_file(rank,sam_file_num,sam_file_size,sam_file_ignore);
gettimeofday(&end,NULL);
printf("rank-%d load_sam_file %.3f s\n",rank,diffTime(start,end) * 0.001);
time(&rawtime);
printf("rank-%d finishing loading file %s\n",rank,ctime(&rawtime));
omp_set_nested(1);
#pragma omp parallel num_threads(2)
{
struct timeval start,end;
if(omp_get_thread_num() % 2 == 0)
{
gettimeofday(&start,NULL);
computeSI(search_radius);
gettimeofday(&end,NULL);
printf("rank-%d thread-%d computeSI %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
count_ref(rank);
gettimeofday(&end,NULL);
printf("rank-%d thread-%d count_ref %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
}
else
{
gettimeofday(&start,NULL);
indexSample();
gettimeofday(&end,NULL);
printf("rank-%d thread-%d indexSample %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
tbb::parallel_sort(sam_node_buffer,sam_node_buffer + sam_N,cmp);
gettimeofday(&end,NULL);
printf("rank-%d thread-%d sort sample %.3f s\n",rank,omp_get_thread_num(),diffTime(start,end) * 0.001);
}
}
time(&rawtime);
printf("rank-%d after computeSI/count_ref and indexSample/sortSample %s\n",rank,ctime(&rawtime));
gettimeofday(&start,NULL);
worker_gather(rank);
gettimeofday(&end,NULL);
printf("rank-%d worker_gather %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
cal_samChunk(rank);
gettimeofday(&end,NULL);
printf("rank-%d cal_samChunk %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
redistribute_S(rank);
gettimeofday(&end,NULL);
printf("rank-%d redistribute_S %.3f s\n",rank,diffTime(start,end) * 0.001);
gettimeofday(&start,NULL);
tbb::parallel_sort(h_sam_node,h_sam_node + sam_CM_N,cmp);
for(int i = 0; i < 100; ++i)
printf("%.6lf %.6lf %d\n",h_sam_node[i].ra,h_sam_node[i].dec,h_sam_node[i].pix);
gettimeofday(&end,NULL);
printf("rank-%d sort sample %.3f s\n",rank,diffTime(start,end) * 0.001);
time(&rawtime);
printf("rank-%d after worker_gather / cal_samChunk / redistribute_S / sortSample %s\n",rank,ctime(&rawtime));
// omp_set_nested(1);
gettimeofday(&start,NULL);
exchange_and_crossmatch(rank);
gettimeofday(&end,NULL);
printf("rank-%d redistribute_R %.3f s\n",rank,diffTime(start,end) * 0.001);
time(&rawtime);
printf("%s ends at %s\n",processor_name,ctime(&rawtime));
}
|
23816bf4b82a0f52b1c5240e1b0d865cf166b593.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef CUDA_VISION_RL_KERNELS_H
#define CUDA_VISION_RL_KERNELS_H
// includes, system
#include <algorithm>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <time.h>
#include <boost/math/common_factor.hpp>
#include <hip/hip_runtime.h>
// includes, project
#include <hip/hip_runtime.h>
#include <cutil_inline.h>
#include <cutil_math.h>
#include "utilsShared.h"
#include "./kernels/stereo.h"
#endif
| 23816bf4b82a0f52b1c5240e1b0d865cf166b593.cu | #ifndef CUDA_VISION_RL_KERNELS_H
#define CUDA_VISION_RL_KERNELS_H
// includes, system
#include <algorithm>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <time.h>
#include <boost/math/common_factor.hpp>
#include <cuda.h>
// includes, project
#include <cuda_runtime.h>
#include <cutil_inline.h>
#include <cutil_math.h>
#include "utilsShared.h"
#include "./kernels/stereo.h"
#endif
|
e6c456a1776a4ac6173a90bd8f55420eeed289ac.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Jacob Austin on 5/17/18.
//
#define GLM_FORCE_PURE
#include "mass.h"
Mass::Mass() {
m = 1.0;
dt = 0.0001;
damping = 1.0;
T = 0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
} // constructor TODO fix timing
void Mass::operator=(CUDA_MASS & mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = mass.valid;
ref_count = this -> ref_count;
arrayptr = this -> arrayptr;
#ifdef CONSTRAINTS
constraints = this -> constraints;
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
Mass::Mass(const Vec & position, double mass, bool fixed, double dt) {
m = mass;
pos = position;
this -> dt = dt;
T = 0;
damping = 1.0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
}
CUDA_MASS::CUDA_MASS(Mass &mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = true;
#ifdef CONSTRAINTS
constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints);
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
#ifdef CONSTRAINTS
void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient
if (type == 0) {
this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num));
this -> constraints.num_constraint_planes++;
this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data());
} else if (type == 1) {
this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num));
this -> constraints.num_contact_planes++;
this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data());
} else if (type == 2) {
this -> constraints.ball.push_back(CudaBall(vec, num));
this -> constraints.num_balls++;
this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data());
} else if (type == 3) {
this -> constraints.direction.push_back(CudaDirection(vec, num));
this -> constraints.num_directions++;
this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data());
}
}
void Mass::clearConstraints(CONSTRAINT_TYPE type) {
if (type == 0) {
this -> constraints.constraint_plane.clear();
this -> constraints.constraint_plane.shrink_to_fit();
this -> constraints.num_constraint_planes = 0;
} else if (type == 1) {
this -> constraints.contact_plane.clear();
this -> constraints.contact_plane.shrink_to_fit();
this -> constraints.num_contact_planes = 0;
} else if (type == 2) {
this -> constraints.ball.clear();
this -> constraints.ball.shrink_to_fit();
this -> constraints.num_balls = 0;
} else if (type == 3) {
this -> constraints.direction.clear();
this -> constraints.direction.shrink_to_fit();
this -> constraints.num_directions = 0;
}
}
void Mass::clearConstraints() {
clearConstraints(CONSTRAINT_PLANE);
clearConstraints(CONTACT_PLANE);
clearConstraints(DIRECTION);
clearConstraints(BALL);
}
void Mass::fix() {
this -> constraints.fixed = true;
}
void Mass::unfix() {
this -> constraints.fixed = false;
}
void Mass::setDrag(double C) {
this -> constraints.drag_coefficient = C;
}
#endif
void Mass::decrementRefCount() {
if (--ref_count == 0) {
if (arrayptr) {
hipFree(arrayptr);
}
delete this;
}
}
| e6c456a1776a4ac6173a90bd8f55420eeed289ac.cu | //
// Created by Jacob Austin on 5/17/18.
//
#define GLM_FORCE_PURE
#include "mass.h"
Mass::Mass() {
m = 1.0;
dt = 0.0001;
damping = 1.0;
T = 0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
} // constructor TODO fix timing
void Mass::operator=(CUDA_MASS & mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = mass.valid;
ref_count = this -> ref_count;
arrayptr = this -> arrayptr;
#ifdef CONSTRAINTS
constraints = this -> constraints;
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
Mass::Mass(const Vec & position, double mass, bool fixed, double dt) {
m = mass;
pos = position;
this -> dt = dt;
T = 0;
damping = 1.0;
valid = true;
arrayptr = nullptr;
ref_count = 0;
#ifdef GRAPHICS
color = Vec(1.0, 0.2, 0.2);
#endif
}
CUDA_MASS::CUDA_MASS(Mass &mass) {
m = mass.m;
dt = mass.dt;
T = mass.T;
damping = mass.damping;
pos = mass.pos;
vel = mass.vel;
acc = mass.acc;
force = mass.force;
valid = true;
#ifdef CONSTRAINTS
constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints);
#endif
#ifdef GRAPHICS
color = mass.color;
#endif
}
#ifdef CONSTRAINTS
void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient
if (type == 0) {
this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num));
this -> constraints.num_constraint_planes++;
this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data());
} else if (type == 1) {
this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num));
this -> constraints.num_contact_planes++;
this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data());
} else if (type == 2) {
this -> constraints.ball.push_back(CudaBall(vec, num));
this -> constraints.num_balls++;
this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data());
} else if (type == 3) {
this -> constraints.direction.push_back(CudaDirection(vec, num));
this -> constraints.num_directions++;
this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data());
}
}
void Mass::clearConstraints(CONSTRAINT_TYPE type) {
if (type == 0) {
this -> constraints.constraint_plane.clear();
this -> constraints.constraint_plane.shrink_to_fit();
this -> constraints.num_constraint_planes = 0;
} else if (type == 1) {
this -> constraints.contact_plane.clear();
this -> constraints.contact_plane.shrink_to_fit();
this -> constraints.num_contact_planes = 0;
} else if (type == 2) {
this -> constraints.ball.clear();
this -> constraints.ball.shrink_to_fit();
this -> constraints.num_balls = 0;
} else if (type == 3) {
this -> constraints.direction.clear();
this -> constraints.direction.shrink_to_fit();
this -> constraints.num_directions = 0;
}
}
void Mass::clearConstraints() {
clearConstraints(CONSTRAINT_PLANE);
clearConstraints(CONTACT_PLANE);
clearConstraints(DIRECTION);
clearConstraints(BALL);
}
void Mass::fix() {
this -> constraints.fixed = true;
}
void Mass::unfix() {
this -> constraints.fixed = false;
}
void Mass::setDrag(double C) {
this -> constraints.drag_coefficient = C;
}
#endif
void Mass::decrementRefCount() {
if (--ref_count == 0) {
if (arrayptr) {
cudaFree(arrayptr);
}
delete this;
}
}
|
9cf1233c476333c15de1b1b8cfdd6ef09891ad9d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
//#define n 256000
using namespace std;
int CPU_results(int *C, int *B,int *A,int N)
{
for(int i=0;i<N;i++)
C[B[i]]=A[i];
return 0;
}
int check_results(int *C, int *B, int *A, int N)
{
for(int i=0; i<N; i++)
{ if(C[B[i]]!=A[i])
{
cout<<i<<endl;
cout<<A[i]<<" "<<C[B[i]]<<endl;
printf("Sorry! Checking Failed!\n");
return 0;
}
}
printf("Good! Checking Passed!\n");
return 1;
}
__global__ void kernel(int *d_C,int *d_B, int *d_A, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= N) return;
int x = d_A[d_B[tid]];
}
int main(int argc, char *argv[])
{
int N=atoi(argv[1]);
int *A, *B, *C, *d_A, *d_B, *d_C;
A=(int *)malloc(N*sizeof(int));
B=(int *)malloc(N*sizeof(int));
C=(int *)malloc(N*sizeof(int));
hipMalloc((void **)&d_A, N*sizeof(int));
hipMalloc((void **)&d_B, N*sizeof(int));
hipMalloc((void **)&d_C, N*sizeof(int));
srand(2013);
vector<int> BV(N);
for(int i=0; i<N; i++)
{
A[i]=rand()%N;
//cout<<"A["<<i<<"]="<<A[i]<<endl;
BV[i]=i;//rand()%N;
}
random_shuffle(BV.begin(),BV.end());
for(int i=0;i<N;i++)
B[i]=BV[i];
hipMemcpy(d_A,A,N*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_B,B,N*sizeof(int),hipMemcpyHostToDevice);
int blocks= 256;
struct timespec time_start, time_end;
clock_gettime(CLOCK_MONOTONIC,&time_start);
hipLaunchKernelGGL(( kernel), dim3((N+255)/256),dim3(blocks), 0, 0, d_C,d_B,d_A,N);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC,&time_end);
double kernel_time=(time_end.tv_sec-time_start.tv_sec)*1.e9+time_end.tv_nsec-time_start.tv_nsec;
cout<<"GPU kernel time= "<<kernel_time*1.e-9<<endl;
//for(int i=0;i<N;i++)
//cout<<"C "<<i<<"="<<C[i]<<endl;
clock_gettime(CLOCK_MONOTONIC,&time_start);
// CPU_results(C,B,A,N);
clock_gettime(CLOCK_MONOTONIC,&time_end);
kernel_time=(time_end.tv_sec-time_start.tv_sec)*1.e9+time_end.tv_nsec-time_start.tv_nsec;
cout<<"CPU time= "<<kernel_time*1.e-9<<endl;
hipMemcpy(C,d_C,N*sizeof(int),hipMemcpyDeviceToHost);
// check_results(C,B,A,N);
return 0;
}
| 9cf1233c476333c15de1b1b8cfdd6ef09891ad9d.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
//#define n 256000
using namespace std;
int CPU_results(int *C, int *B,int *A,int N)
{
for(int i=0;i<N;i++)
C[B[i]]=A[i];
return 0;
}
int check_results(int *C, int *B, int *A, int N)
{
for(int i=0; i<N; i++)
{ if(C[B[i]]!=A[i])
{
cout<<i<<endl;
cout<<A[i]<<" "<<C[B[i]]<<endl;
printf("Sorry! Checking Failed!\n");
return 0;
}
}
printf("Good! Checking Passed!\n");
return 1;
}
__global__ void kernel(int *d_C,int *d_B, int *d_A, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= N) return;
int x = d_A[d_B[tid]];
}
int main(int argc, char *argv[])
{
int N=atoi(argv[1]);
int *A, *B, *C, *d_A, *d_B, *d_C;
A=(int *)malloc(N*sizeof(int));
B=(int *)malloc(N*sizeof(int));
C=(int *)malloc(N*sizeof(int));
cudaMalloc((void **)&d_A, N*sizeof(int));
cudaMalloc((void **)&d_B, N*sizeof(int));
cudaMalloc((void **)&d_C, N*sizeof(int));
srand(2013);
vector<int> BV(N);
for(int i=0; i<N; i++)
{
A[i]=rand()%N;
//cout<<"A["<<i<<"]="<<A[i]<<endl;
BV[i]=i;//rand()%N;
}
random_shuffle(BV.begin(),BV.end());
for(int i=0;i<N;i++)
B[i]=BV[i];
cudaMemcpy(d_A,A,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,N*sizeof(int),cudaMemcpyHostToDevice);
int blocks= 256;
struct timespec time_start, time_end;
clock_gettime(CLOCK_MONOTONIC,&time_start);
kernel<<<(N+255)/256,blocks>>>(d_C,d_B,d_A,N);
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC,&time_end);
double kernel_time=(time_end.tv_sec-time_start.tv_sec)*1.e9+time_end.tv_nsec-time_start.tv_nsec;
cout<<"GPU kernel time= "<<kernel_time*1.e-9<<endl;
//for(int i=0;i<N;i++)
//cout<<"C "<<i<<"="<<C[i]<<endl;
clock_gettime(CLOCK_MONOTONIC,&time_start);
// CPU_results(C,B,A,N);
clock_gettime(CLOCK_MONOTONIC,&time_end);
kernel_time=(time_end.tv_sec-time_start.tv_sec)*1.e9+time_end.tv_nsec-time_start.tv_nsec;
cout<<"CPU time= "<<kernel_time*1.e-9<<endl;
cudaMemcpy(C,d_C,N*sizeof(int),cudaMemcpyDeviceToHost);
// check_results(C,B,A,N);
return 0;
}
|
50a3917303fc6dfd269f42051056f6e5a05b025d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/partitioning.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <cub/block/block_scan.cuh>
#include <cub/device/device_histogram.cuh>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently
* via a single bitwise AND as: n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/**
* @brief Computes which partition each row of a device_table will belong to
based on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the
global size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a
partition number
* @param[out] row_partition_numbers Array that holds which partition each row
belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in
its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition
for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1
partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/**
* @brief Given an array of partition numbers, computes the final output
location for each element in the output such that all rows with the same
partition are contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for
each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each
partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1
partition(num_partitions -1) offset, ...} }
*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the
// corresponding partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output
* table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row
* belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in
* its partition of the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition
* for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global = partition_offset_shared + num_partitions + 1;
using BlockScan = hipcub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support up to 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread
// block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread
// block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
hipLaunchKernelGGL(( copy_block_partitions), dim3(grid_size), dim3(OPTIMIZED_BLOCK_SIZE), smem, stream.value(),
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_uvector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_uvector<size_type> gather_map(num_rows, stream);
copy_block_partitions_impl(sequence,
gather_map.begin(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType>
constexpr static bool is_copy_block_supported()
{
// The shared-memory used for fixed-width types in the copy_block_partitions_impl function
// will be too large for any DataType greater than int64_t.
return is_fixed_width<DataType>() && (sizeof(DataType) <= sizeof(int64_t));
}
template <typename DataType, CUDF_ENABLE_IF(is_copy_block_supported<DataType>())>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(
input.type(), input.size(), std::move(output), rmm::device_buffer{}, 0);
}
template <typename DataType, CUDF_ENABLE_IF(not is_copy_block_supported<DataType>())>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
auto gather_table = cudf::detail::gather(cudf::table_view({input}),
gather_map,
out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::move(gather_table->release().front());
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <template <typename> class hash_function, bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_uvector<size_type>(num_rows, stream);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1
// partition(num_partitions -1) size, ...} }
auto block_partition_sizes = rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
auto scanned_block_partition_sizes =
rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
// Holds the total number of rows in each partition
auto global_partition_sizes = cudf::detail::make_zeroed_device_uvector_async<size_type>(
num_partitions, stream, rmm::mr::get_current_device_resource());
auto row_partition_offset = cudf::detail::make_zeroed_device_uvector_async<size_type>(
num_rows, stream, rmm::mr::get_current_device_resource());
auto const row_hasher = experimental::row::hash::row_hasher(table_to_hash, stream);
auto const hasher =
row_hasher.device_hasher<hash_function>(nullate::DYNAMIC{hash_has_nulls}, seed);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(), hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
} else {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(), hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data());
// Compute exclusive scan of size of each partition to determine offset
// location of each partition in final output.
// TODO This can be done independently on a separate stream
thrust::exclusive_scan(rmm::exec_policy(stream),
global_partition_sizes.begin(),
global_partition_sizes.end(),
global_partition_sizes.begin());
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
auto const partition_offsets =
cudf::detail::make_std_vector_async(global_partition_sizes, stream);
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [&](auto const& col) {
return cudf::type_dispatcher<dispatch_storage_type>(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream,
mr);
});
if (has_nested_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, stream, mr);
}
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::pair(std::make_unique<table>(std::move(output_cols)), std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data()};
hipLaunchKernelGGL(( compute_row_output_locations), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream.value(),
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(input, row_partition_numbers, input, stream, mr);
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::pair(std::move(output), std::move(partition_offsets));
}
}
struct dispatch_map_type {
/**
* @brief Partitions the table `t` according to the `partition_map`.
*
* Algorithm:
* - Compute the histogram of the size each partition
* - Compute the exclusive scan of the histogram to get the offset for each
* partition in the final partitioned output
* - Use a transform iterator to materialize the scatter map of the rows from
* `t` into the final output.
*
* @note JH: It would likely be more efficient to avoid the atomic increments
* in the transform iterator. It would probably be faster to compute a
* per-thread block histogram and compute an exclusive scan of all of the
* per-block histograms (like in hash partition). But I'm purposefully trying
* to reduce memory pressure by avoiding intermediate materializations. Plus,
* atomics resolve in L2 and should be pretty fast since all the offsets will
* fit in L2.
*
*/
template <typename MapType>
std::enable_if_t<is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Build a histogram of the number of rows in each partition
rmm::device_uvector<size_type> histogram(num_partitions + 1, stream);
std::size_t temp_storage_bytes{};
std::size_t const num_levels = num_partitions + 1;
size_type const lower_level = 0;
size_type const upper_level = num_partitions;
cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
rmm::device_buffer temp_storage(temp_storage_bytes, stream);
cub::DeviceHistogram::HistogramEven(temp_storage.data(),
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
// `histogram` was created with an extra entry at the end such that an
// exclusive scan will put the total number of rows at the end
thrust::exclusive_scan(
rmm::exec_policy(stream), histogram.begin(), histogram.end(), histogram.begin());
// Copy offsets to host before the transform below modifies the histogram
auto const partition_offsets = cudf::detail::make_std_vector_sync(histogram, stream);
// Unfortunately need to materialize the scatter map because
// `detail::scatter` requires multiple passes through the iterator
rmm::device_uvector<size_type> scatter_map(partition_map.size(), stream);
// For each `partition_map[i]`, atomically increment the corresponding
// partition offset to determine `i`s location in the output
thrust::transform(rmm::exec_policy(stream),
partition_map.begin<MapType>(),
partition_map.end<MapType>(),
scatter_map.begin(),
[offsets = histogram.data()] __device__(auto partition_number) {
return atomicAdd(&offsets[partition_number], 1);
});
// Scatter the rows into their partitions
auto scattered = detail::scatter(t, scatter_map, t, stream, mr);
return std::pair(std::move(scattered), std::move(partition_offsets));
}
template <typename MapType, typename... Args>
std::enable_if_t<not is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(Args&&...) const
{
CUDF_FAIL("Unexpected, non-integral partition map.");
}
};
} // namespace
namespace detail {
namespace {
template <template <typename> class hash_function>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::pair(empty_like(input), std::vector<size_type>(num_partitions, 0));
}
if (has_nested_nulls(table_to_hash)) {
return hash_partition_table<hash_function, true>(
input, table_to_hash, num_partitions, seed, stream, mr);
} else {
return hash_partition_table<hash_function, false>(
input, table_to_hash, num_partitions, seed, stream, mr);
}
}
} // namespace
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(t.num_rows() == partition_map.size(),
"Size mismatch between table and partition map.");
CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map.");
if (num_partitions == 0 or t.num_rows() == 0) {
// The output offsets vector must have size `num_partitions + 1` as per documentation.
return std::pair(empty_like(t), std::vector<size_type>(num_partitions + 1, 0));
}
return cudf::type_dispatcher(
partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, stream, mr);
}
} // namespace detail
// Partition based on hash values
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
switch (hash_function) {
case (hash_id::HASH_IDENTITY):
for (const size_type& column_id : columns_to_hash) {
if (!is_numeric(input.column(column_id).type()))
CUDF_FAIL("IdentityHash does not support this data type");
}
return detail::hash_partition<detail::IdentityHash>(
input, columns_to_hash, num_partitions, seed, stream, mr);
case (hash_id::HASH_MURMUR3):
return detail::hash_partition<detail::MurmurHash3_32>(
input, columns_to_hash, num_partitions, seed, stream, mr);
default: CUDF_FAIL("Unsupported hash function in hash_partition");
}
}
// Partition based on an explicit partition map
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(t, partition_map, num_partitions, cudf::get_default_stream(), mr);
}
} // namespace cudf
| 50a3917303fc6dfd269f42051056f6e5a05b025d.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/partitioning.hpp>
#include <cudf/table/experimental/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <cub/block/block_scan.cuh>
#include <cub/device/device_histogram.cuh>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently
* via a single bitwise AND as: n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/**
* @brief Computes which partition each row of a device_table will belong to
based on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the
global size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a
partition number
* @param[out] row_partition_numbers Array that holds which partition each row
belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in
its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition
for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1
partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/**
* @brief Given an array of partition numbers, computes the final output
location for each element in the output such that all rows with the same
partition are contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for
each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each
partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1
partition(num_partitions -1) offset, ...} }
*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the
// corresponding partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output
* table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row
* belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in
* its partition of the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition
* for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global = partition_offset_shared + num_partitions + 1;
using BlockScan = cub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support up to 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread
// block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread
// block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
copy_block_partitions<<<grid_size, OPTIMIZED_BLOCK_SIZE, smem, stream.value()>>>(
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_uvector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_uvector<size_type> gather_map(num_rows, stream);
copy_block_partitions_impl(sequence,
gather_map.begin(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType>
constexpr static bool is_copy_block_supported()
{
// The shared-memory used for fixed-width types in the copy_block_partitions_impl function
// will be too large for any DataType greater than int64_t.
return is_fixed_width<DataType>() && (sizeof(DataType) <= sizeof(int64_t));
}
template <typename DataType, CUDF_ENABLE_IF(is_copy_block_supported<DataType>())>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(
input.type(), input.size(), std::move(output), rmm::device_buffer{}, 0);
}
template <typename DataType, CUDF_ENABLE_IF(not is_copy_block_supported<DataType>())>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
auto gather_table = cudf::detail::gather(cudf::table_view({input}),
gather_map,
out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::move(gather_table->release().front());
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <template <typename> class hash_function, bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_uvector<size_type>(num_rows, stream);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1
// partition(num_partitions -1) size, ...} }
auto block_partition_sizes = rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
auto scanned_block_partition_sizes =
rmm::device_uvector<size_type>(grid_size * num_partitions, stream);
// Holds the total number of rows in each partition
auto global_partition_sizes = cudf::detail::make_zeroed_device_uvector_async<size_type>(
num_partitions, stream, rmm::mr::get_current_device_resource());
auto row_partition_offset = cudf::detail::make_zeroed_device_uvector_async<size_type>(
num_rows, stream, rmm::mr::get_current_device_resource());
auto const row_hasher = experimental::row::hash::row_hasher(table_to_hash, stream);
auto const hasher =
row_hasher.device_hasher<hash_function>(nullate::DYNAMIC{hash_has_nulls}, seed);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
} else {
// Determines how the mapping between hash value and partition number is
// computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and
// performing a partitioning operator on the hash value. Also computes the
// number of rows in each partition both for each thread block as well as
// across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
global_partition_sizes.data());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data());
// Compute exclusive scan of size of each partition to determine offset
// location of each partition in final output.
// TODO This can be done independently on a separate stream
thrust::exclusive_scan(rmm::exec_policy(stream),
global_partition_sizes.begin(),
global_partition_sizes.end(),
global_partition_sizes.begin());
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
auto const partition_offsets =
cudf::detail::make_std_vector_async(global_partition_sizes, stream);
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [&](auto const& col) {
return cudf::type_dispatcher<dispatch_storage_type>(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream,
mr);
});
if (has_nested_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers.data(),
row_partition_offset.data(),
block_partition_sizes.data(),
scanned_block_partition_sizes.data(),
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, stream, mr);
}
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::pair(std::make_unique<table>(std::move(output_cols)), std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data()};
compute_row_output_locations<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream.value()>>>(
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(input, row_partition_numbers, input, stream, mr);
stream.synchronize(); // Async D2H copy must finish before returning host vec
return std::pair(std::move(output), std::move(partition_offsets));
}
}
struct dispatch_map_type {
/**
* @brief Partitions the table `t` according to the `partition_map`.
*
* Algorithm:
* - Compute the histogram of the size each partition
* - Compute the exclusive scan of the histogram to get the offset for each
* partition in the final partitioned output
* - Use a transform iterator to materialize the scatter map of the rows from
* `t` into the final output.
*
* @note JH: It would likely be more efficient to avoid the atomic increments
* in the transform iterator. It would probably be faster to compute a
* per-thread block histogram and compute an exclusive scan of all of the
* per-block histograms (like in hash partition). But I'm purposefully trying
* to reduce memory pressure by avoiding intermediate materializations. Plus,
* atomics resolve in L2 and should be pretty fast since all the offsets will
* fit in L2.
*
*/
template <typename MapType>
std::enable_if_t<is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr) const
{
// Build a histogram of the number of rows in each partition
rmm::device_uvector<size_type> histogram(num_partitions + 1, stream);
std::size_t temp_storage_bytes{};
std::size_t const num_levels = num_partitions + 1;
size_type const lower_level = 0;
size_type const upper_level = num_partitions;
cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
rmm::device_buffer temp_storage(temp_storage_bytes, stream);
cub::DeviceHistogram::HistogramEven(temp_storage.data(),
temp_storage_bytes,
partition_map.begin<MapType>(),
histogram.data(),
num_levels,
lower_level,
upper_level,
partition_map.size(),
stream.value());
// `histogram` was created with an extra entry at the end such that an
// exclusive scan will put the total number of rows at the end
thrust::exclusive_scan(
rmm::exec_policy(stream), histogram.begin(), histogram.end(), histogram.begin());
// Copy offsets to host before the transform below modifies the histogram
auto const partition_offsets = cudf::detail::make_std_vector_sync(histogram, stream);
// Unfortunately need to materialize the scatter map because
// `detail::scatter` requires multiple passes through the iterator
rmm::device_uvector<size_type> scatter_map(partition_map.size(), stream);
// For each `partition_map[i]`, atomically increment the corresponding
// partition offset to determine `i`s location in the output
thrust::transform(rmm::exec_policy(stream),
partition_map.begin<MapType>(),
partition_map.end<MapType>(),
scatter_map.begin(),
[offsets = histogram.data()] __device__(auto partition_number) {
return atomicAdd(&offsets[partition_number], 1);
});
// Scatter the rows into their partitions
auto scattered = detail::scatter(t, scatter_map, t, stream, mr);
return std::pair(std::move(scattered), std::move(partition_offsets));
}
template <typename MapType, typename... Args>
std::enable_if_t<not is_index_type<MapType>(),
std::pair<std::unique_ptr<table>, std::vector<size_type>>>
operator()(Args&&...) const
{
CUDF_FAIL("Unexpected, non-integral partition map.");
}
};
} // namespace
namespace detail {
namespace {
template <template <typename> class hash_function>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::pair(empty_like(input), std::vector<size_type>(num_partitions, 0));
}
if (has_nested_nulls(table_to_hash)) {
return hash_partition_table<hash_function, true>(
input, table_to_hash, num_partitions, seed, stream, mr);
} else {
return hash_partition_table<hash_function, false>(
input, table_to_hash, num_partitions, seed, stream, mr);
}
}
} // namespace
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(t.num_rows() == partition_map.size(),
"Size mismatch between table and partition map.");
CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map.");
if (num_partitions == 0 or t.num_rows() == 0) {
// The output offsets vector must have size `num_partitions + 1` as per documentation.
return std::pair(empty_like(t), std::vector<size_type>(num_partitions + 1, 0));
}
return cudf::type_dispatcher(
partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, stream, mr);
}
} // namespace detail
// Partition based on hash values
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
hash_id hash_function,
uint32_t seed,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
switch (hash_function) {
case (hash_id::HASH_IDENTITY):
for (const size_type& column_id : columns_to_hash) {
if (!is_numeric(input.column(column_id).type()))
CUDF_FAIL("IdentityHash does not support this data type");
}
return detail::hash_partition<detail::IdentityHash>(
input, columns_to_hash, num_partitions, seed, stream, mr);
case (hash_id::HASH_MURMUR3):
return detail::hash_partition<detail::MurmurHash3_32>(
input, columns_to_hash, num_partitions, seed, stream, mr);
default: CUDF_FAIL("Unsupported hash function in hash_partition");
}
}
// Partition based on an explicit partition map
std::pair<std::unique_ptr<table>, std::vector<size_type>> partition(
table_view const& t,
column_view const& partition_map,
size_type num_partitions,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(t, partition_map, num_partitions, cudf::get_default_stream(), mr);
}
} // namespace cudf
|
8a4c2b44313224a5ec1f30bd8040710d998e2d44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <c10/macros/Macros.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/block_reduce.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros_like.h>
#include <ATen/ops/sum_cuda_dispatch.h>
#include <ATen/ops/multilabel_margin_loss.h>
#endif
namespace at {
namespace native {
namespace {
const int MULTILABELMARGIN_THREADS = 128;
void check_shape(const Tensor& input, const Tensor& target) {
int64_t ndims = input.dim();
bool valid_inputs = (ndims == 2 && input.size(1) != 0) ||
(ndims == 1 && input.size(0) != 0) || (ndims == 0);
TORCH_CHECK(
valid_inputs,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
input.sizes());
if (ndims <= 1) {
int dim = input.dim() == 0 ? 1 : input.size(0);
TORCH_CHECK(
valid_inputs && target.dim() <= 1 && target.numel() == dim,
"inconsistent target size: ",
target.sizes(),
" for input of size: ",
input.sizes());
} else if (ndims == 2) {
int nframe = input.size(0);
int dim = input.size(1);
TORCH_CHECK(
valid_inputs && target.dim() == 2 && target.size(0) == nframe &&
target.size(1) == dim,
"inconsistent target size: ",
target.sizes(),
" for input of size: ",
input.sizes());
} else {
TORCH_CHECK(false, "Expected input of ndims <= 2, but got ndims: ", ndims);
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS)
__global__ void multilabel_margin_loss_forward_kernel(
scalar_t* output,
scalar_t* input,
int64_t* target,
scalar_t* is_target,
int nframe,
int dim,
bool size_average) {
// vectors:
int k = blockIdx.x;
scalar_t* input_k = input + k * dim;
int64_t* target_k = target + k * dim;
scalar_t* output_k = output + k;
scalar_t* is_target_k = is_target + k * dim;
// zero is_target
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
is_target_k[d] = static_cast<scalar_t>(0);
}
__syncthreads();
// mark targets in is_target
if (threadIdx.x == 0) {
for (int dt = 0; dt < dim; dt++) {
int target_idx = target_k[dt];
if (target_idx < 0) {
break;
}
is_target_k[target_idx] = static_cast<scalar_t>(1);
}
}
__syncthreads();
// iterate over targets
accscalar_t sum = 0;
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = target_k[dt];
if (target_idx < 0) {
break;
}
// current value for target
scalar_t input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!static_cast<int>(is_target_k[d])) {
scalar_t z = 1 - input_target_k + input_k[d];
if (z > 0) {
sum += z;
}
}
}
}
// Temporary sums (for mapreduce)
__shared__ accscalar_t smem[MULTILABELMARGIN_THREADS];
accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
if (size_average) {
*output_k = static_cast<scalar_t>((total_sum / dim) / nframe);
} else {
*output_k = static_cast<scalar_t>(total_sum / dim);
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS)
__global__ void multilabel_margin_loss_backward_kernel(
scalar_t* grad_input,
scalar_t* grad_output,
scalar_t* input,
int64_t* target,
scalar_t* is_target,
int nframe,
int dim,
bool size_average,
bool reduce) {
int k = blockIdx.x;
scalar_t* input_k = input + k * dim;
scalar_t* grad_input_k = grad_input + k * dim;
int64_t* target_k = target + k * dim;
scalar_t* is_target_k = is_target + k * dim;
scalar_t* grad_output_k = grad_output;
if (!reduce) {
grad_output_k += k;
}
// gain:
scalar_t g = static_cast<scalar_t>(
size_average && reduce ? 1. / static_cast<accscalar_t>(nframe * dim)
: 1. / static_cast<accscalar_t>(dim));
// zero gradients:
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
grad_input_k[d] = static_cast<scalar_t>(0);
}
__syncthreads();
// iterate over targets
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = static_cast<int>(target_k[dt]);
if (target_idx < 0) {
break;
}
// current value for target
scalar_t input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
accscalar_t sum = 0;
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!static_cast<int>(is_target_k[d])) {
scalar_t z = 1 - input_target_k + input_k[d];
if (z > 0) {
sum -= g;
grad_input_k[d] += g;
}
}
}
__syncthreads();
// Temporary sums (for mapreduce)
__shared__ accscalar_t smem[MULTILABELMARGIN_THREADS];
accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
grad_input_k[target_idx] += static_cast<scalar_t>(total_sum);
}
}
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
grad_input_k[d] *= *grad_output_k;
}
}
void multilabel_margin_loss_forward_out_cuda_template(
const Tensor& input,
const Tensor& target,
int64_t reduction,
Tensor& output,
Tensor& is_target) {
check_shape(input, target);
if (input.numel() == 0) {
return;
}
auto input_ = input.contiguous();
auto target_ = target.contiguous();
auto is_target_ = is_target.contiguous();
is_target_.resize_as_(target);
if (input.dim() <= 1) {
int dim = input.dim() == 0 ? 1 : input.size(0);
output.resize_({});
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_forward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
hipLaunchKernelGGL(( multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>)
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
1,
dim,
reduction == at::Reduction::Mean);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else if (input.dim() == 2) {
int nframe = input.size(0);
int dim = input.size(1);
dim3 blocks(input.size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None) {
auto output_tmp = at::empty({input_.size(0)}, input_.options());
output.resize_({});
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_forward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
hipLaunchKernelGGL(( multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>)
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output_tmp.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
nframe,
dim,
reduction == at::Reduction::Mean);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
at::cuda::sum_out(
output,
output_tmp,
at::IntArrayRef(std::vector<int64_t>{}),
false,
output.scalar_type());
} else {
output.resize_({input.size(0)});
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_forward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
hipLaunchKernelGGL(( multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>)
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
nframe,
dim,
false);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} else {
TORCH_CHECK(
false,
"Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
input.sizes());
}
}
void multilabel_margin_loss_backward_cuda_out_template(
const Tensor& grad_output,
const Tensor& input,
const Tensor& target,
int64_t reduction,
const Tensor& is_target,
Tensor& grad_input) {
check_shape(input, target);
auto input_ = input.contiguous();
if (input_.numel() == 0) {
return;
}
grad_input.resize_as_(input_);
auto target_ = target.contiguous();
auto is_target_ = is_target.contiguous();
auto grad_output_ = grad_output.contiguous();
if (grad_input.dim() <= 1) {
int dim = grad_input.dim() == 0 ? 1 : grad_input.size(0);
int target_size = target_.dim() == 0 ? 1 : target_.size(0);
TORCH_CHECK(
(target_.numel() != 0) && (target_.dim() <= 1) && (target_size == dim),
"inconsistent target size");
TORCH_CHECK(
target_.sizes() == is_target_.sizes(), "inconsistent is_target size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_backward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
hipLaunchKernelGGL(( multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output_.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
1,
dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else if (grad_input.dim() == 2) {
int nframe = grad_input.size(0);
int dim = grad_input.size(1);
TORCH_CHECK(
(input_.size(1) != 0) && (target_.dim() == 2) &&
(target_.size(0) == nframe) && (target_.size(1) == dim),
"inconsistent target size");
TORCH_CHECK(target_.sizes() == is_target_.sizes(), "inconsistent is_target size");
dim3 blocks(grad_input.size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_backward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
hipLaunchKernelGGL(( multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output_.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
grad_input.size(0),
grad_input.size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(
false,
"Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
grad_input.sizes());
}
}
} // namespace
std::tuple<Tensor&, Tensor&> multilabel_margin_loss_forward_out_cuda(
const Tensor& self,
const Tensor& target,
int64_t reduction,
Tensor& output,
Tensor& is_target) {
multilabel_margin_loss_forward_out_cuda_template(
self, target, reduction, output, is_target);
return std::tuple<Tensor&, Tensor&>(output, is_target);
}
std::tuple<Tensor, Tensor> multilabel_margin_loss_forward_cuda(
const Tensor& self,
const Tensor& target,
int64_t reduction) {
auto output = at::empty({0}, self.options());
auto is_target = at::empty({0}, self.options());
multilabel_margin_loss_forward_out_cuda_template(
self, target, reduction, output, is_target);
return std::make_tuple(output, is_target);
}
Tensor& multilabel_margin_loss_backward_cuda_out(
const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
int64_t reduction,
const Tensor& is_target,
Tensor& grad_input) {
multilabel_margin_loss_backward_cuda_out_template(
grad_output, self, target, reduction, is_target, grad_input);
return grad_input;
}
Tensor multilabel_margin_loss_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
int64_t reduction,
const Tensor& is_target) {
auto grad_input = at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
multilabel_margin_loss_backward_cuda_out_template(
grad_output, self, target, reduction, is_target, grad_input);
return grad_input;
}
} // namespace native
} // namespace at
| 8a4c2b44313224a5ec1f30bd8040710d998e2d44.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <c10/macros/Macros.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/block_reduce.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros_like.h>
#include <ATen/ops/sum_cuda_dispatch.h>
#include <ATen/ops/multilabel_margin_loss.h>
#endif
namespace at {
namespace native {
namespace {
const int MULTILABELMARGIN_THREADS = 128;
void check_shape(const Tensor& input, const Tensor& target) {
int64_t ndims = input.dim();
bool valid_inputs = (ndims == 2 && input.size(1) != 0) ||
(ndims == 1 && input.size(0) != 0) || (ndims == 0);
TORCH_CHECK(
valid_inputs,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
input.sizes());
if (ndims <= 1) {
int dim = input.dim() == 0 ? 1 : input.size(0);
TORCH_CHECK(
valid_inputs && target.dim() <= 1 && target.numel() == dim,
"inconsistent target size: ",
target.sizes(),
" for input of size: ",
input.sizes());
} else if (ndims == 2) {
int nframe = input.size(0);
int dim = input.size(1);
TORCH_CHECK(
valid_inputs && target.dim() == 2 && target.size(0) == nframe &&
target.size(1) == dim,
"inconsistent target size: ",
target.sizes(),
" for input of size: ",
input.sizes());
} else {
TORCH_CHECK(false, "Expected input of ndims <= 2, but got ndims: ", ndims);
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS)
__global__ void multilabel_margin_loss_forward_kernel(
scalar_t* output,
scalar_t* input,
int64_t* target,
scalar_t* is_target,
int nframe,
int dim,
bool size_average) {
// vectors:
int k = blockIdx.x;
scalar_t* input_k = input + k * dim;
int64_t* target_k = target + k * dim;
scalar_t* output_k = output + k;
scalar_t* is_target_k = is_target + k * dim;
// zero is_target
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
is_target_k[d] = static_cast<scalar_t>(0);
}
__syncthreads();
// mark targets in is_target
if (threadIdx.x == 0) {
for (int dt = 0; dt < dim; dt++) {
int target_idx = target_k[dt];
if (target_idx < 0) {
break;
}
is_target_k[target_idx] = static_cast<scalar_t>(1);
}
}
__syncthreads();
// iterate over targets
accscalar_t sum = 0;
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = target_k[dt];
if (target_idx < 0) {
break;
}
// current value for target
scalar_t input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!static_cast<int>(is_target_k[d])) {
scalar_t z = 1 - input_target_k + input_k[d];
if (z > 0) {
sum += z;
}
}
}
}
// Temporary sums (for mapreduce)
__shared__ accscalar_t smem[MULTILABELMARGIN_THREADS];
accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
if (size_average) {
*output_k = static_cast<scalar_t>((total_sum / dim) / nframe);
} else {
*output_k = static_cast<scalar_t>(total_sum / dim);
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS)
__global__ void multilabel_margin_loss_backward_kernel(
scalar_t* grad_input,
scalar_t* grad_output,
scalar_t* input,
int64_t* target,
scalar_t* is_target,
int nframe,
int dim,
bool size_average,
bool reduce) {
int k = blockIdx.x;
scalar_t* input_k = input + k * dim;
scalar_t* grad_input_k = grad_input + k * dim;
int64_t* target_k = target + k * dim;
scalar_t* is_target_k = is_target + k * dim;
scalar_t* grad_output_k = grad_output;
if (!reduce) {
grad_output_k += k;
}
// gain:
scalar_t g = static_cast<scalar_t>(
size_average && reduce ? 1. / static_cast<accscalar_t>(nframe * dim)
: 1. / static_cast<accscalar_t>(dim));
// zero gradients:
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
grad_input_k[d] = static_cast<scalar_t>(0);
}
__syncthreads();
// iterate over targets
for (int dt = 0; dt < dim; dt++) {
// next target:
int target_idx = static_cast<int>(target_k[dt]);
if (target_idx < 0) {
break;
}
// current value for target
scalar_t input_target_k = input_k[target_idx];
// compare to all inputs (multithreaded):
accscalar_t sum = 0;
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
// contribute to loss only if not a target
if (!static_cast<int>(is_target_k[d])) {
scalar_t z = 1 - input_target_k + input_k[d];
if (z > 0) {
sum -= g;
grad_input_k[d] += g;
}
}
}
__syncthreads();
// Temporary sums (for mapreduce)
__shared__ accscalar_t smem[MULTILABELMARGIN_THREADS];
accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
grad_input_k[target_idx] += static_cast<scalar_t>(total_sum);
}
}
for (int d = threadIdx.x; d < dim; d += blockDim.x) {
grad_input_k[d] *= *grad_output_k;
}
}
void multilabel_margin_loss_forward_out_cuda_template(
const Tensor& input,
const Tensor& target,
int64_t reduction,
Tensor& output,
Tensor& is_target) {
check_shape(input, target);
if (input.numel() == 0) {
return;
}
auto input_ = input.contiguous();
auto target_ = target.contiguous();
auto is_target_ = is_target.contiguous();
is_target_.resize_as_(target);
if (input.dim() <= 1) {
int dim = input.dim() == 0 ? 1 : input.size(0);
output.resize_({});
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_forward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
1,
dim,
reduction == at::Reduction::Mean);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else if (input.dim() == 2) {
int nframe = input.size(0);
int dim = input.size(1);
dim3 blocks(input.size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None) {
auto output_tmp = at::empty({input_.size(0)}, input_.options());
output.resize_({});
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_forward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
output_tmp.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
nframe,
dim,
reduction == at::Reduction::Mean);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
at::cuda::sum_out(
output,
output_tmp,
at::IntArrayRef(std::vector<int64_t>{}),
false,
output.scalar_type());
} else {
output.resize_({input.size(0)});
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_forward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
nframe,
dim,
false);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} else {
TORCH_CHECK(
false,
"Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
input.sizes());
}
}
void multilabel_margin_loss_backward_cuda_out_template(
const Tensor& grad_output,
const Tensor& input,
const Tensor& target,
int64_t reduction,
const Tensor& is_target,
Tensor& grad_input) {
check_shape(input, target);
auto input_ = input.contiguous();
if (input_.numel() == 0) {
return;
}
grad_input.resize_as_(input_);
auto target_ = target.contiguous();
auto is_target_ = is_target.contiguous();
auto grad_output_ = grad_output.contiguous();
if (grad_input.dim() <= 1) {
int dim = grad_input.dim() == 0 ? 1 : grad_input.size(0);
int target_size = target_.dim() == 0 ? 1 : target_.size(0);
TORCH_CHECK(
(target_.numel() != 0) && (target_.dim() <= 1) && (target_size == dim),
"inconsistent target size");
TORCH_CHECK(
target_.sizes() == is_target_.sizes(), "inconsistent is_target size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_backward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output_.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
1,
dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else if (grad_input.dim() == 2) {
int nframe = grad_input.size(0);
int dim = grad_input.size(1);
TORCH_CHECK(
(input_.size(1) != 0) && (target_.dim() == 2) &&
(target_.size(0) == nframe) && (target_.size(1) == dim),
"inconsistent target size");
TORCH_CHECK(target_.sizes() == is_target_.sizes(), "inconsistent is_target size");
dim3 blocks(grad_input.size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"multilabel_margin_loss_backward_kernel",
[&] {
using accscalar_t = at::acc_type<scalar_t, true>;
multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output_.data_ptr<scalar_t>(),
input_.data_ptr<scalar_t>(),
target_.data_ptr<int64_t>(),
is_target_.data_ptr<scalar_t>(),
grad_input.size(0),
grad_input.size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
TORCH_CHECK(
false,
"Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
grad_input.sizes());
}
}
} // namespace
std::tuple<Tensor&, Tensor&> multilabel_margin_loss_forward_out_cuda(
const Tensor& self,
const Tensor& target,
int64_t reduction,
Tensor& output,
Tensor& is_target) {
multilabel_margin_loss_forward_out_cuda_template(
self, target, reduction, output, is_target);
return std::tuple<Tensor&, Tensor&>(output, is_target);
}
std::tuple<Tensor, Tensor> multilabel_margin_loss_forward_cuda(
const Tensor& self,
const Tensor& target,
int64_t reduction) {
auto output = at::empty({0}, self.options());
auto is_target = at::empty({0}, self.options());
multilabel_margin_loss_forward_out_cuda_template(
self, target, reduction, output, is_target);
return std::make_tuple(output, is_target);
}
Tensor& multilabel_margin_loss_backward_cuda_out(
const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
int64_t reduction,
const Tensor& is_target,
Tensor& grad_input) {
multilabel_margin_loss_backward_cuda_out_template(
grad_output, self, target, reduction, is_target, grad_input);
return grad_input;
}
Tensor multilabel_margin_loss_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
int64_t reduction,
const Tensor& is_target) {
auto grad_input = at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
multilabel_margin_loss_backward_cuda_out_template(
grad_output, self, target, reduction, is_target, grad_input);
return grad_input;
}
} // namespace native
} // namespace at
|
b688735b3a60c9291adeb2c44ce24d687a33c547.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <tune_quda.h>
#include <gauge_field.h>
#include <jitify_helper.cuh>
#include <kernels/field_strength_tensor.cuh>
#include <instantiate.h>
namespace quda
{
template <typename Float, int nColor, QudaReconstructType recon> class Fmunu : TunableVectorYZ
{
FmunuArg<Float, nColor, recon> arg;
const GaugeField &meta;
unsigned int minThreads() const { return arg.threads; }
bool tuneGridDim() const { return false; }
public:
Fmunu(const GaugeField &u, GaugeField &f) :
TunableVectorYZ(2, 6),
arg(f, u),
meta(f)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
#ifdef JITIFY
create_jitify_program("kernels/field_strength_tensor.cuh");
#endif
}
apply(0);
qudaDeviceSynchronize();
checkCudaError();
}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::computeFmunuKernel").instantiate(Type<Arg>())
.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
hipLaunchKernelGGL(( computeFmunuKernel), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, 0, arg);
#endif
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return (2430 + 36) * 6 * 2 * (long long)arg.threads; }
long long bytes() const
{
return ((16 * arg.u.Bytes() + arg.f.Bytes()) * 6 * 2 * arg.threads);
} // Ignores link reconstruction
}; // Fmunu
void computeFmunu(GaugeField &f, const GaugeField &u)
{
#ifdef GPU_GAUGE_TOOLS
checkPrecision(f, u);
instantiate<Fmunu,ReconstructWilson>(u, f); // u must be first here for correct template instantiation
#else
errorQuda("Gauge tools are not built");
#endif // GPU_GAUGE_TOOLS
}
} // namespace quda
| b688735b3a60c9291adeb2c44ce24d687a33c547.cu | #include <tune_quda.h>
#include <gauge_field.h>
#include <jitify_helper.cuh>
#include <kernels/field_strength_tensor.cuh>
#include <instantiate.h>
namespace quda
{
template <typename Float, int nColor, QudaReconstructType recon> class Fmunu : TunableVectorYZ
{
FmunuArg<Float, nColor, recon> arg;
const GaugeField &meta;
unsigned int minThreads() const { return arg.threads; }
bool tuneGridDim() const { return false; }
public:
Fmunu(const GaugeField &u, GaugeField &f) :
TunableVectorYZ(2, 6),
arg(f, u),
meta(f)
{
strcpy(aux, meta.AuxString());
strcat(aux, comm_dim_partitioned_string());
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
#ifdef JITIFY
create_jitify_program("kernels/field_strength_tensor.cuh");
#endif
}
apply(0);
qudaDeviceSynchronize();
checkCudaError();
}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
#ifdef JITIFY
using namespace jitify::reflection;
jitify_error = program->kernel("quda::computeFmunuKernel").instantiate(Type<Arg>())
.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg);
#else
computeFmunuKernel<<<tp.grid, tp.block, tp.shared_bytes>>>(arg);
#endif
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return (2430 + 36) * 6 * 2 * (long long)arg.threads; }
long long bytes() const
{
return ((16 * arg.u.Bytes() + arg.f.Bytes()) * 6 * 2 * arg.threads);
} // Ignores link reconstruction
}; // Fmunu
void computeFmunu(GaugeField &f, const GaugeField &u)
{
#ifdef GPU_GAUGE_TOOLS
checkPrecision(f, u);
instantiate<Fmunu,ReconstructWilson>(u, f); // u must be first here for correct template instantiation
#else
errorQuda("Gauge tools are not built");
#endif // GPU_GAUGE_TOOLS
}
} // namespace quda
|
27853cda90d971948a1c5d30cf692bcf3ce45314.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void mandelKernel(int* d_img, float lowerX, float lowerY, float stepX, float stepY, int width, int height, int maxIterations)
{
// To avoid error caused by the floating number, use the following pseudo code
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
unsigned int thisX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int thisY = blockIdx.y * blockDim.y + threadIdx.y;
if (thisX < width && thisY < height) {
int idx = thisY * width + thisX;
float c_re = lowerX + thisX * stepX;
float c_im = lowerY + thisY * stepY;
float z_re = c_re, z_im = c_im;
int i = 0;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
d_img[idx] = i;
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int* d_img, *host_img;
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// hipMalloc((void **)&d_img, resX * resY * sizeof(int)); // kernel1
// host_img = (int *) malloc(resX * resY * sizeof(int)); // kernel1
size_t pitch; // kernel2
hipMallocPitch((void **)&d_img, &pitch, sizeof(float)*resX, resY); // kernel2
hipHostMalloc((void **)&host_img, resX * resY * sizeof(int),hipHostMallocDefault); // kernel2
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlock(resX / BLOCK_SIZE, resY / BLOCK_SIZE);
hipLaunchKernelGGL(( mandelKernel), dim3(numBlock), dim3(blockSize), 0, 0, d_img, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
hipDeviceSynchronize();
hipMemcpy(host_img, d_img, resX * resY * sizeof(int), hipMemcpyDeviceToHost);
memcpy(img,host_img,resX * resY * sizeof(int));
hipFree(d_img);
}
| 27853cda90d971948a1c5d30cf692bcf3ce45314.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void mandelKernel(int* d_img, float lowerX, float lowerY, float stepX, float stepY, int width, int height, int maxIterations)
{
// To avoid error caused by the floating number, use the following pseudo code
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
unsigned int thisX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int thisY = blockIdx.y * blockDim.y + threadIdx.y;
if (thisX < width && thisY < height) {
int idx = thisY * width + thisX;
float c_re = lowerX + thisX * stepX;
float c_im = lowerY + thisY * stepY;
float z_re = c_re, z_im = c_im;
int i = 0;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
d_img[idx] = i;
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int* d_img, *host_img;
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// cudaMalloc((void **)&d_img, resX * resY * sizeof(int)); // kernel1
// host_img = (int *) malloc(resX * resY * sizeof(int)); // kernel1
size_t pitch; // kernel2
cudaMallocPitch((void **)&d_img, &pitch, sizeof(float)*resX, resY); // kernel2
cudaHostAlloc((void **)&host_img, resX * resY * sizeof(int),cudaHostAllocDefault); // kernel2
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlock(resX / BLOCK_SIZE, resY / BLOCK_SIZE);
mandelKernel<<<numBlock, blockSize>>>(d_img, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
cudaDeviceSynchronize();
cudaMemcpy(host_img, d_img, resX * resY * sizeof(int), cudaMemcpyDeviceToHost);
memcpy(img,host_img,resX * resY * sizeof(int));
cudaFree(d_img);
}
|
b32cb76780c323c82cf3bc6a3ad629d73757f3c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
csymv.cu is nearly identical to chemv.cu, just change names and drop MAGMA_C_CONJ.
csymv_kernel_U (upper) in csymv_upper.cu is very similar to
csymv_kernel_L (lower) in csymv.cu; diff the two files to compare.
Note: [ds] precisions generated from chemv.cu
@generated from magmablas/zsymv.cu, normal z -> c, Sun Nov 20 20:20:31 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ A11*x1 --- ]
work = [ A12*x2 (A21*x1 + A22*x2) --- ]
[ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
csymv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(ty2*4 + j, tx2) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = rA[j] * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end csymv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
csymv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_csymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements csymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_csymv_work requires users to provide a workspace, while
magmablas_csymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call csymv frequently, we suggest using
magmablas_csymv_work instead of magmablas_csymv. As the overhead to
allocate and free in device memory in magmablas_csymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_symv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_csymv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( csymv_kernel_U), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( csymv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( csymv_kernel_L), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( csymv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_csymv_work
/***************************************************************************//**
Purpose
-------
magmablas_csymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_csymv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; no CUBLAS version of csymv.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_csymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_csymv
| b32cb76780c323c82cf3bc6a3ad629d73757f3c8.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
csymv.cu is nearly identical to chemv.cu, just change names and drop MAGMA_C_CONJ.
csymv_kernel_U (upper) in csymv_upper.cu is very similar to
csymv_kernel_L (lower) in csymv.cu; diff the two files to compare.
Note: [ds] precisions generated from chemv.cu
@generated from magmablas/zsymv.cu, normal z -> c, Sun Nov 20 20:20:31 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ A11*x1 --- ]
work = [ A12*x2 (A21*x1 + A22*x2) --- ]
[ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
csymv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(ty2*4 + j, tx2) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = rA[j] * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end csymv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
csymv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_csymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements csymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_csymv_work requires users to provide a workspace, while
magmablas_csymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call csymv frequently, we suggest using
magmablas_csymv_work instead of magmablas_csymv. As the overhead to
allocate and free in device memory in magmablas_csymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_symv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_csymv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
csymv_kernel_U<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
csymv_kernel_U_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
csymv_kernel_L<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
csymv_kernel_L_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_csymv_work
/***************************************************************************//**
Purpose
-------
magmablas_csymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_symv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_csymv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; no CUBLAS version of csymv.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_csymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_csymv
|
ca73f6ca8da1b5b395ba184dad692ef9f7d8eb1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// these are just for timing measurments
#include <time.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int DSIZE = 4096;
const int block_size = 16; // CUDA maximum is 1024 *total* threads in block
const float A_val = 1.0f;
const float B_val = 2.0f;
// matrix multiply (naive) kernel: C = A * B
__global__ void mmul(const float *A, const float *B, float *C, int ds) {
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < ds) && (idy < ds)){
float temp = 0;
for (int i = 0; i < ds; i++)
temp += A[idy*ds+i] * B[i*ds+idx]; // dot product of row and column
C[idy*ds+idx] = temp;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// start timing
t0 = clock();
h_A = new float[DSIZE*DSIZE];
h_B = new float[DSIZE*DSIZE];
h_C = new float[DSIZE*DSIZE];
for (int i = 0; i < DSIZE*DSIZE; i++){
h_A[i] = A_val;
h_B[i] = B_val;
h_C[i] = 0;}
// Initialization timing
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// Allocate device memory and copy input data over to GPU
hipMalloc(&d_A, DSIZE*DSIZE*sizeof(float));
hipMalloc(&d_B, DSIZE*DSIZE*sizeof(float));
hipMalloc(&d_C, DSIZE*DSIZE*sizeof(float));
cudaCheckErrors("hipMalloc failure");
hipMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, DSIZE*DSIZE*sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy H2D failure");
// Cuda processing sequence step 1 is complete
// Launch kernel
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid((DSIZE+block.x-1)/block.x, (DSIZE+block.y-1)/block.y);
hipLaunchKernelGGL(( mmul), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, DSIZE);
cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
hipMemcpy(h_C, d_C, DSIZE*DSIZE*sizeof(float), hipMemcpyDeviceToHost);
// GPU timing
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
// Cuda processing sequence step 3 is complete
// Verify results
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
for (int i = 0; i < DSIZE*DSIZE; i++) if (h_C[i] != A_val*B_val*DSIZE) {printf("mismatch at index %d, was: %f, should be: %f\n", i, h_C[i], A_val*B_val*DSIZE); return -1;}
printf("Success!\n");
return 0;
}
| ca73f6ca8da1b5b395ba184dad692ef9f7d8eb1c.cu | #include <stdio.h>
// these are just for timing measurments
#include <time.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int DSIZE = 4096;
const int block_size = 16; // CUDA maximum is 1024 *total* threads in block
const float A_val = 1.0f;
const float B_val = 2.0f;
// matrix multiply (naive) kernel: C = A * B
__global__ void mmul(const float *A, const float *B, float *C, int ds) {
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < ds) && (idy < ds)){
float temp = 0;
for (int i = 0; i < ds; i++)
temp += A[idy*ds+i] * B[i*ds+idx]; // dot product of row and column
C[idy*ds+idx] = temp;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// start timing
t0 = clock();
h_A = new float[DSIZE*DSIZE];
h_B = new float[DSIZE*DSIZE];
h_C = new float[DSIZE*DSIZE];
for (int i = 0; i < DSIZE*DSIZE; i++){
h_A[i] = A_val;
h_B[i] = B_val;
h_C[i] = 0;}
// Initialization timing
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// Allocate device memory and copy input data over to GPU
cudaMalloc(&d_A, DSIZE*DSIZE*sizeof(float));
cudaMalloc(&d_B, DSIZE*DSIZE*sizeof(float));
cudaMalloc(&d_C, DSIZE*DSIZE*sizeof(float));
cudaCheckErrors("cudaMalloc failure");
cudaMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, DSIZE*DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D failure");
// Cuda processing sequence step 1 is complete
// Launch kernel
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid((DSIZE+block.x-1)/block.x, (DSIZE+block.y-1)/block.y);
mmul<<<grid, block>>>(d_A, d_B, d_C, DSIZE);
cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
cudaMemcpy(h_C, d_C, DSIZE*DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
// GPU timing
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
// Cuda processing sequence step 3 is complete
// Verify results
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
for (int i = 0; i < DSIZE*DSIZE; i++) if (h_C[i] != A_val*B_val*DSIZE) {printf("mismatch at index %d, was: %f, should be: %f\n", i, h_C[i], A_val*B_val*DSIZE); return -1;}
printf("Success!\n");
return 0;
}
|
d286a8ff990063894dd22daf05694def9f9688e9.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************************************
hotspot.cu
(c) 2014 - Nikhil R Podduturi, J. Seth Strattan
J. Michael Cherry Lab, Department of Genetics, Stanford University School of Medicine
Licensed under the GNU General Public License 2.0 license.
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <ostream>
#include <fstream>
#include <nvbio/basic/console.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/shared_pointer.h>
#include <hip/hip_runtime_api.h>
#include <thrust/partition.h>
#include "hotspot_kernal.h"
void crcInit();
using namespace nvbio;
using namespace hotspot;
struct comp_chr
{
int chr;
comp_chr(int _chr) {chr=_chr;}
__host__ __device__
bool operator()(const Alignment x)
{
return x.ref_id != chr;
}
};
int main(int argc, char* argv[])
{
Timer timer;
timer.start();
hipSetDeviceFlags( hipDeviceMapHost | hipDeviceLmemResizeToMax );
crcInit();
int cuda_device = -1;
const char* library_file = NULL;
const char* density_file = NULL;
const char* output_file = NULL;
// hotspot defaults for now
int low_int = 250;
int high_int = 1000;
int int_increment = 100;
int fuzzy_seed = 1;
double genome_size = 2.55E9;
double min_SD = 3.0;
bool use_fuzzy = false;
int arg = 1;
while (arg < argc)
{
if (strcmp( argv[arg], "-device" ) == 0)
{
cuda_device = atoi(argv[++arg]);
++arg;
}
else if (strcmp( argv[arg], "-i" ) == 0)
{
library_file = argv[++arg];
++arg;
}
else if (strcmp( argv[arg], "-k" ) == 0)
{
density_file = argv[++arg];
++arg;
}
else if (strcmp( argv[arg], "-o" ) == 0)
{
output_file = argv[++arg];
++arg;
}
else if (strcmp( argv[arg], "-range") == 0)
{
low_int = atoi(argv[arg + 1]);
high_int = atoi(argv[arg + 2]);
int_increment = atoi(argv[arg + 3]);
arg = arg + 3;
}
else if (strcmp( argv[arg], "-bckgnmsize") == 0)
{
genome_size = atof(argv[++arg]);
++arg;
}
else if (strcmp( argv[arg], "-minsd") == 0)
{
min_SD = atof(argv[++arg]);
++arg;
}
else if (strcmp( argv[arg], "-fuzzy") == 0)
{
use_fuzzy = true;
++arg;
}
else if (strcmp( argv[arg], "-fuzzy-seed") == 0)
{
fuzzy_seed = atoi(argv[++arg]);
++arg;
}
else
break;
}
// inspect and select cuda devices
int device_count;
hipGetDeviceCount(&device_count);
log_verbose(stderr, " cuda devices : %d\n", device_count);
if (device_count)
{
if (cuda_device == -1)
{
int best_device = 0;
hipDeviceProp_t best_device_prop;
hipGetDeviceProperties( &best_device_prop, best_device );
for (int device = 0; device < device_count; ++device)
{
hipDeviceProp_t device_prop;
hipGetDeviceProperties( &device_prop, device );
if (device_prop.major >= best_device_prop.major &&
device_prop.minor >= best_device_prop.minor)
{
best_device_prop = device_prop;
best_device = device;
}
}
cuda_device = best_device;
}
log_verbose(stderr, " chosen device %d\n", cuda_device);
{
hipDeviceProp_t device_prop;
hipGetDeviceProperties( &device_prop, cuda_device );
log_verbose(stderr, " device name : %s\n", device_prop.name);
log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor);
}
hipSetDevice( cuda_device );
}
SharedPointer<AlignmentStream> library_stream = SharedPointer<AlignmentStream>( open_alignment_file( library_file ) );
if (library_stream == NULL || library_stream->is_ok() == false)
{
log_error(stderr, "failed opening \"%s\"\n", library_file);
exit(1);
}
thrust::host_vector<Alignment> h_alignments;
const uint32 total_tag_count = library_stream->read( &h_alignments );
log_info(stderr, "Total tags found = %d\n", total_tag_count);
log_info(stderr, "Identifying hotspots started\n");
thrust::device_vector<Alignment> d_alignments( h_alignments );
for(int i=0; i<24; ++i)
{
thrust::device_vector<Alignment>::iterator iter = thrust::stable_partition(
thrust::device,
d_alignments.begin(),
d_alignments.end(),
comp_chr(i)
);
thrust::device_vector<Alignment> d_chr(iter, d_alignments.end());
d_alignments.erase(iter, d_alignments.end());
log_info(stderr, "Calculating hotspots for chr%d\n", i+1);
if (d_chr.size() != 0)
{
log_info(stderr, " Total tags found - %lu\n", d_chr.size());
thrust::device_vector<Hotspot> hotspots(d_chr.size());
thrust::device_vector<Hotspot> filtered_hotspots(d_chr.size());
compute_hotspots(d_chr, hotspots, low_int, high_int, int_increment,
genome_size, total_tag_count, min_SD, use_fuzzy, fuzzy_seed);
filter_hotspots(hotspots, filtered_hotspots);
hotspots.clear();
hotspots.shrink_to_fit();
}
else
{
log_info(stderr, " No tags found in chr%d\n", i+1);
}
// Clear the memory
d_chr.clear();
d_chr.shrink_to_fit();
}
timer.stop();
log_info(stderr, "Time taken - %um:%us\n",
uint32(timer.seconds()/60),
uint32(timer.seconds())%60);
return 0;
}
| d286a8ff990063894dd22daf05694def9f9688e9.cu | /*****************************************************************************
hotspot.cu
(c) 2014 - Nikhil R Podduturi, J. Seth Strattan
J. Michael Cherry Lab, Department of Genetics, Stanford University School of Medicine
Licensed under the GNU General Public License 2.0 license.
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <ostream>
#include <fstream>
#include <nvbio/basic/console.h>
#include <nvbio/basic/timer.h>
#include <nvbio/basic/shared_pointer.h>
#include <cuda_runtime_api.h>
#include <thrust/partition.h>
#include "hotspot_kernal.h"
void crcInit();
using namespace nvbio;
using namespace hotspot;
struct comp_chr
{
int chr;
comp_chr(int _chr) {chr=_chr;}
__host__ __device__
bool operator()(const Alignment x)
{
return x.ref_id != chr;
}
};
int main(int argc, char* argv[])
{
Timer timer;
timer.start();
cudaSetDeviceFlags( cudaDeviceMapHost | cudaDeviceLmemResizeToMax );
crcInit();
int cuda_device = -1;
const char* library_file = NULL;
const char* density_file = NULL;
const char* output_file = NULL;
// hotspot defaults for now
int low_int = 250;
int high_int = 1000;
int int_increment = 100;
int fuzzy_seed = 1;
double genome_size = 2.55E9;
double min_SD = 3.0;
bool use_fuzzy = false;
int arg = 1;
while (arg < argc)
{
if (strcmp( argv[arg], "-device" ) == 0)
{
cuda_device = atoi(argv[++arg]);
++arg;
}
else if (strcmp( argv[arg], "-i" ) == 0)
{
library_file = argv[++arg];
++arg;
}
else if (strcmp( argv[arg], "-k" ) == 0)
{
density_file = argv[++arg];
++arg;
}
else if (strcmp( argv[arg], "-o" ) == 0)
{
output_file = argv[++arg];
++arg;
}
else if (strcmp( argv[arg], "-range") == 0)
{
low_int = atoi(argv[arg + 1]);
high_int = atoi(argv[arg + 2]);
int_increment = atoi(argv[arg + 3]);
arg = arg + 3;
}
else if (strcmp( argv[arg], "-bckgnmsize") == 0)
{
genome_size = atof(argv[++arg]);
++arg;
}
else if (strcmp( argv[arg], "-minsd") == 0)
{
min_SD = atof(argv[++arg]);
++arg;
}
else if (strcmp( argv[arg], "-fuzzy") == 0)
{
use_fuzzy = true;
++arg;
}
else if (strcmp( argv[arg], "-fuzzy-seed") == 0)
{
fuzzy_seed = atoi(argv[++arg]);
++arg;
}
else
break;
}
// inspect and select cuda devices
int device_count;
cudaGetDeviceCount(&device_count);
log_verbose(stderr, " cuda devices : %d\n", device_count);
if (device_count)
{
if (cuda_device == -1)
{
int best_device = 0;
cudaDeviceProp best_device_prop;
cudaGetDeviceProperties( &best_device_prop, best_device );
for (int device = 0; device < device_count; ++device)
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, device );
if (device_prop.major >= best_device_prop.major &&
device_prop.minor >= best_device_prop.minor)
{
best_device_prop = device_prop;
best_device = device;
}
}
cuda_device = best_device;
}
log_verbose(stderr, " chosen device %d\n", cuda_device);
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, cuda_device );
log_verbose(stderr, " device name : %s\n", device_prop.name);
log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor);
}
cudaSetDevice( cuda_device );
}
SharedPointer<AlignmentStream> library_stream = SharedPointer<AlignmentStream>( open_alignment_file( library_file ) );
if (library_stream == NULL || library_stream->is_ok() == false)
{
log_error(stderr, "failed opening \"%s\"\n", library_file);
exit(1);
}
thrust::host_vector<Alignment> h_alignments;
const uint32 total_tag_count = library_stream->read( &h_alignments );
log_info(stderr, "Total tags found = %d\n", total_tag_count);
log_info(stderr, "Identifying hotspots started\n");
thrust::device_vector<Alignment> d_alignments( h_alignments );
for(int i=0; i<24; ++i)
{
thrust::device_vector<Alignment>::iterator iter = thrust::stable_partition(
thrust::device,
d_alignments.begin(),
d_alignments.end(),
comp_chr(i)
);
thrust::device_vector<Alignment> d_chr(iter, d_alignments.end());
d_alignments.erase(iter, d_alignments.end());
log_info(stderr, "Calculating hotspots for chr%d\n", i+1);
if (d_chr.size() != 0)
{
log_info(stderr, " Total tags found - %lu\n", d_chr.size());
thrust::device_vector<Hotspot> hotspots(d_chr.size());
thrust::device_vector<Hotspot> filtered_hotspots(d_chr.size());
compute_hotspots(d_chr, hotspots, low_int, high_int, int_increment,
genome_size, total_tag_count, min_SD, use_fuzzy, fuzzy_seed);
filter_hotspots(hotspots, filtered_hotspots);
hotspots.clear();
hotspots.shrink_to_fit();
}
else
{
log_info(stderr, " No tags found in chr%d\n", i+1);
}
// Clear the memory
d_chr.clear();
d_chr.shrink_to_fit();
}
timer.stop();
log_info(stderr, "Time taken - %um:%us\n",
uint32(timer.seconds()/60),
uint32(timer.seconds())%60);
return 0;
}
|
1f0c4b03dbf6f5bc3fdfce1561bdc9a5bea1453b.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 2;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
float *x, *y, *z;
int N=numElements;
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
hipMallocManaged(&z, N * sizeof(float));
// Verify that allocations succeeded
if (x == NULL || y == NULL || z == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
x[i] = rand()/(float)RAND_MAX;
printf("x[%d] %llu\t", i, &x[i]);
y[i] = rand()/(float)RAND_MAX;
printf("y[%d] %llu\n", i, &y[i]);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x, y, z, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Test PASSED\n");
hipStreamSynchronize(0);
// Free device global memory
err = hipFree(x);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(y);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(z);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 1f0c4b03dbf6f5bc3fdfce1561bdc9a5bea1453b.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 2;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
float *x, *y, *z;
int N=numElements;
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
cudaMallocManaged(&z, N * sizeof(float));
// Verify that allocations succeeded
if (x == NULL || y == NULL || z == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
x[i] = rand()/(float)RAND_MAX;
printf("x[%d] %llu\t", i, &x[i]);
y[i] = rand()/(float)RAND_MAX;
printf("y[%d] %llu\n", i, &y[i]);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(x, y, z, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Test PASSED\n");
cudaStreamSynchronize(0);
// Free device global memory
err = cudaFree(x);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(y);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(z);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
0e37d6d2c26590beaf1e41879adcb4bdbcb1ea4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/padding/padding.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include <algorithm>
#include <cstring>
#include <iostream>
#include "megdnn/basic_types.h"
#include "padding.cuh"
#include "src/cuda/int_fastdiv.cuh"
#include "src/cuda/query_blocksize.cuh"
namespace megdnn {
namespace cuda {
namespace padding {
struct ShapeParams {
size_t src_shape[MEGDNN_MAX_NDIM];
size_t dst_shape[MEGDNN_MAX_NDIM];
Uint32Fastdiv src_stride[MEGDNN_MAX_NDIM];
Uint32Fastdiv dst_stride[MEGDNN_MAX_NDIM];
size_t offsets[MEGDNN_MAX_NDIM * 2];
};
template <typename T>
__global__ void paddingConst_kernel(
const size_t ndim, const size_t total_out_nr, const T* const src, T* const dst,
ShapeParams params, const float_t padding_val) {
KERN_FOR(out_index, total_out_nr) {
bool in_src_valid_area = true;
size_t in_index = 0;
size_t out_index_tmp = out_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
Uint32Fastdiv dst_stride = params.dst_stride[dim],
src_stride = params.src_stride[dim];
size_t src_shape = params.src_shape[dim];
size_t offset = params.offsets[dim * 2];
size_t dim_index = out_index_tmp / dst_stride;
in_src_valid_area &=
(dim_index >= offset && dim_index < offset + src_shape);
if (!in_src_valid_area)
break;
out_index_tmp -= dim_index * dst_stride.divisor();
in_index += (dim_index - offset) * src_stride.divisor();
/*
size_t dim_index = out_index_tmp / params.dst_stride[dim];
out_index_tmp -= dim_index * params.dst_stride[dim].divisor();
in_src_valid_area &= (dim_index >= params.offsets[dim * 2] &&
dim_index < params.offsets[dim * 2] +
params.src_shape[dim]);
in_index += (dim_index - params.offsets[dim * 2]) *
params.src_stride[dim].divisor();
*/
}
dst[out_index] = in_src_valid_area ? src[in_index] : padding_val;
}
}
template <typename T>
__global__ void paddingReplicate_kernel(
const size_t ndim, const size_t total_out_nr, const T* const src, T* const dst,
ShapeParams params, const float_t) {
KERN_FOR(out_index, total_out_nr) {
size_t in_index = 0;
size_t out_index_tmp = out_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
size_t dim_index = out_index_tmp / params.dst_stride[dim];
out_index_tmp -= dim_index * params.dst_stride[dim].divisor();
dim_index = (size_t)llmin(
(long long)params.src_shape[dim] - 1,
llmax((long long)dim_index - (long long)params.offsets[dim * 2],
(long long)0));
in_index += dim_index * params.src_stride[dim].divisor();
}
dst[out_index] = src[in_index];
}
}
template <typename T>
__global__ void paddingReflect_kernel(
const size_t ndim, const size_t total_out_nr, const T* const src, T* const dst,
ShapeParams params, const float_t) {
KERN_FOR(out_index, total_out_nr) {
size_t in_index = 0;
size_t out_index_tmp = out_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
long long dim_index = out_index_tmp / params.dst_stride[dim];
out_index_tmp -= dim_index * params.dst_stride[dim].divisor();
dim_index -= (long long)params.offsets[dim * 2];
dim_index = llmax(dim_index, -dim_index);
dim_index = llmin(
dim_index, 2 * (long long)params.src_shape[dim] - dim_index - 2);
in_index += size_t(dim_index) * (size_t)params.src_stride[dim].divisor();
}
dst[out_index] = src[in_index];
}
}
template <typename T>
__global__ void paddingConstBackward_kernel(
const size_t ndim, const size_t total_in_nr, const T* const src, T* const dst,
ShapeParams params) {
KERN_FOR(in_index, total_in_nr) {
bool in_dst_valid_area = true;
size_t out_index = 0;
size_t in_index_tmp = in_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
size_t dim_index = in_index_tmp / params.src_stride[dim];
in_index_tmp -= dim_index * params.src_stride[dim].divisor();
in_dst_valid_area &=
(dim_index >= params.offsets[dim * 2] &&
dim_index < params.offsets[dim * 2] + params.dst_shape[dim]);
out_index += (dim_index - params.offsets[dim * 2]) *
params.dst_stride[dim].divisor();
}
if (in_dst_valid_area) {
dst[out_index] = src[in_index];
}
}
}
template <typename T>
__global__ void paddingReplicateBackward_kernel(
const size_t ndim, const size_t total_in_nr, const T* const src, T* const dst,
ShapeParams params) {
KERN_FOR(in_index, total_in_nr) {
size_t out_index = 0;
size_t in_index_tmp = in_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
size_t dim_index = in_index_tmp / params.src_stride[dim];
in_index_tmp -= dim_index * params.src_stride[dim].divisor();
dim_index = (size_t)llmin(
(long long)params.dst_shape[dim] - 1,
llmax((long long)dim_index - (long long)params.offsets[dim * 2],
(long long)0));
out_index += dim_index * params.dst_stride[dim].divisor();
}
atomic_add(&dst[out_index], src[in_index]);
}
}
template <typename T>
__global__ void paddingReflectBackward_kernel(
const size_t ndim, const size_t total_in_nr, const T* const src, T* const dst,
ShapeParams params) {
KERN_FOR(in_index, total_in_nr) {
size_t out_index = 0;
size_t in_index_tmp = in_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
long long dim_index = in_index_tmp / params.src_stride[dim];
in_index_tmp -= dim_index * params.src_stride[dim].divisor();
dim_index -= (long long)params.offsets[dim * 2];
dim_index = llmax(dim_index, -dim_index);
dim_index = llmin(
dim_index, 2 * (long long)params.dst_shape[dim] - dim_index - 2);
out_index += size_t(dim_index) * (size_t)params.dst_stride[dim].divisor();
}
atomic_add(&dst[out_index], src[in_index]);
}
}
template <typename T>
void padding_forward_proxy(
const TensorND& src, const TensorND& dst, size_t offsets[MEGDNN_MAX_NDIM * 2],
uint32_t mode, const float_t padding_val, hipStream_t stream) {
ShapeParams params;
for (size_t i = 0; i < src.layout.ndim; ++i) {
params.src_shape[i] = src.layout.shape[i];
params.dst_shape[i] = dst.layout.shape[i];
params.src_stride[i] = src.layout.stride[i];
params.dst_stride[i] = dst.layout.stride[i];
params.offsets[i * 2] = offsets[i * 2];
params.offsets[i * 2 + 1] = offsets[i * 2 + 1];
}
void (*fwd_kern)(
const size_t, const size_t, const T* const, T* const, ShapeParams,
const float_t);
switch (mode) {
case param_enumv::Padding::PaddingMode::CONSTANT:
fwd_kern = paddingConst_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REPLICATE:
fwd_kern = paddingReplicate_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REFLECT:
fwd_kern = paddingReflect_kernel<T>;
break;
default:
megdnn_assert(false, "invalid padding mode");
}
size_t total_nr = dst.layout.total_nr_elems();
uint32_t nr_threads = query_blocksize_for_kernel(fwd_kern);
dim3 threads(nr_threads);
dim3 blocks(DIVUP(total_nr, nr_threads));
hipLaunchKernelGGL(( fwd_kern), dim3(blocks), dim3(threads), 0, stream,
src.layout.ndim, total_nr, src.ptr<T>(), dst.ptr<T>(), params, padding_val);
after_kernel_launch();
}
template <typename T>
void padding_backward_proxy(
const TensorND& src, const TensorND& dst, size_t offsets[MEGDNN_MAX_NDIM * 2],
uint32_t mode, hipStream_t stream) {
ShapeParams params;
for (size_t i = 0; i < src.layout.ndim; ++i) {
params.src_shape[i] = src.layout.shape[i];
params.dst_shape[i] = dst.layout.shape[i];
params.src_stride[i] = src.layout.stride[i];
params.dst_stride[i] = dst.layout.stride[i];
params.offsets[i * 2] = offsets[i * 2];
params.offsets[i * 2 + 1] = offsets[i * 2 + 1];
}
hipMemset(dst.raw_ptr, 0, dst.layout.access_bytes());
void (*bwd_kern)(const size_t, const size_t, const T* const, T* const, ShapeParams);
switch (mode) {
case param_enumv::Padding::PaddingMode::CONSTANT:
bwd_kern = paddingConstBackward_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REPLICATE:
bwd_kern = paddingReplicateBackward_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REFLECT:
bwd_kern = paddingReflectBackward_kernel<T>;
break;
default:
megdnn_assert(false, "invalid padding mode");
}
size_t total_nr = src.layout.total_nr_elems();
uint32_t nr_threads = query_blocksize_for_kernel(bwd_kern);
dim3 threads(nr_threads);
dim3 blocks(DIVUP(total_nr, nr_threads));
hipLaunchKernelGGL(( bwd_kern), dim3(blocks), dim3(threads), 0, stream,
src.layout.ndim, total_nr, src.ptr<T>(), dst.ptr<T>(), params);
after_kernel_launch();
}
#define INST(T) \
template void padding_forward_proxy<T>( \
const TensorND& src, const TensorND& dst, \
size_t offsets[MEGDNN_MAX_NDIM * 2], uint32_t mode, \
const float_t padding_val, hipStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
#define INST(T) \
template void padding_backward_proxy<T>( \
const TensorND& src, const TensorND& dst, \
size_t offsets[MEGDNN_MAX_NDIM * 2], uint32_t mode, hipStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb)
#undef cb
#undef INST
} // namespace padding
} // namespace cuda
} // namespace megdnn | 0e37d6d2c26590beaf1e41879adcb4bdbcb1ea4a.cu | /**
* \file dnn/src/cuda/padding/padding.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include <algorithm>
#include <cstring>
#include <iostream>
#include "megdnn/basic_types.h"
#include "padding.cuh"
#include "src/cuda/int_fastdiv.cuh"
#include "src/cuda/query_blocksize.cuh"
namespace megdnn {
namespace cuda {
namespace padding {
struct ShapeParams {
size_t src_shape[MEGDNN_MAX_NDIM];
size_t dst_shape[MEGDNN_MAX_NDIM];
Uint32Fastdiv src_stride[MEGDNN_MAX_NDIM];
Uint32Fastdiv dst_stride[MEGDNN_MAX_NDIM];
size_t offsets[MEGDNN_MAX_NDIM * 2];
};
template <typename T>
__global__ void paddingConst_kernel(
const size_t ndim, const size_t total_out_nr, const T* const src, T* const dst,
ShapeParams params, const float_t padding_val) {
KERN_FOR(out_index, total_out_nr) {
bool in_src_valid_area = true;
size_t in_index = 0;
size_t out_index_tmp = out_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
Uint32Fastdiv dst_stride = params.dst_stride[dim],
src_stride = params.src_stride[dim];
size_t src_shape = params.src_shape[dim];
size_t offset = params.offsets[dim * 2];
size_t dim_index = out_index_tmp / dst_stride;
in_src_valid_area &=
(dim_index >= offset && dim_index < offset + src_shape);
if (!in_src_valid_area)
break;
out_index_tmp -= dim_index * dst_stride.divisor();
in_index += (dim_index - offset) * src_stride.divisor();
/*
size_t dim_index = out_index_tmp / params.dst_stride[dim];
out_index_tmp -= dim_index * params.dst_stride[dim].divisor();
in_src_valid_area &= (dim_index >= params.offsets[dim * 2] &&
dim_index < params.offsets[dim * 2] +
params.src_shape[dim]);
in_index += (dim_index - params.offsets[dim * 2]) *
params.src_stride[dim].divisor();
*/
}
dst[out_index] = in_src_valid_area ? src[in_index] : padding_val;
}
}
template <typename T>
__global__ void paddingReplicate_kernel(
const size_t ndim, const size_t total_out_nr, const T* const src, T* const dst,
ShapeParams params, const float_t) {
KERN_FOR(out_index, total_out_nr) {
size_t in_index = 0;
size_t out_index_tmp = out_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
size_t dim_index = out_index_tmp / params.dst_stride[dim];
out_index_tmp -= dim_index * params.dst_stride[dim].divisor();
dim_index = (size_t)llmin(
(long long)params.src_shape[dim] - 1,
llmax((long long)dim_index - (long long)params.offsets[dim * 2],
(long long)0));
in_index += dim_index * params.src_stride[dim].divisor();
}
dst[out_index] = src[in_index];
}
}
template <typename T>
__global__ void paddingReflect_kernel(
const size_t ndim, const size_t total_out_nr, const T* const src, T* const dst,
ShapeParams params, const float_t) {
KERN_FOR(out_index, total_out_nr) {
size_t in_index = 0;
size_t out_index_tmp = out_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
long long dim_index = out_index_tmp / params.dst_stride[dim];
out_index_tmp -= dim_index * params.dst_stride[dim].divisor();
dim_index -= (long long)params.offsets[dim * 2];
dim_index = llmax(dim_index, -dim_index);
dim_index = llmin(
dim_index, 2 * (long long)params.src_shape[dim] - dim_index - 2);
in_index += size_t(dim_index) * (size_t)params.src_stride[dim].divisor();
}
dst[out_index] = src[in_index];
}
}
template <typename T>
__global__ void paddingConstBackward_kernel(
const size_t ndim, const size_t total_in_nr, const T* const src, T* const dst,
ShapeParams params) {
KERN_FOR(in_index, total_in_nr) {
bool in_dst_valid_area = true;
size_t out_index = 0;
size_t in_index_tmp = in_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
size_t dim_index = in_index_tmp / params.src_stride[dim];
in_index_tmp -= dim_index * params.src_stride[dim].divisor();
in_dst_valid_area &=
(dim_index >= params.offsets[dim * 2] &&
dim_index < params.offsets[dim * 2] + params.dst_shape[dim]);
out_index += (dim_index - params.offsets[dim * 2]) *
params.dst_stride[dim].divisor();
}
if (in_dst_valid_area) {
dst[out_index] = src[in_index];
}
}
}
template <typename T>
__global__ void paddingReplicateBackward_kernel(
const size_t ndim, const size_t total_in_nr, const T* const src, T* const dst,
ShapeParams params) {
KERN_FOR(in_index, total_in_nr) {
size_t out_index = 0;
size_t in_index_tmp = in_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
size_t dim_index = in_index_tmp / params.src_stride[dim];
in_index_tmp -= dim_index * params.src_stride[dim].divisor();
dim_index = (size_t)llmin(
(long long)params.dst_shape[dim] - 1,
llmax((long long)dim_index - (long long)params.offsets[dim * 2],
(long long)0));
out_index += dim_index * params.dst_stride[dim].divisor();
}
atomic_add(&dst[out_index], src[in_index]);
}
}
template <typename T>
__global__ void paddingReflectBackward_kernel(
const size_t ndim, const size_t total_in_nr, const T* const src, T* const dst,
ShapeParams params) {
KERN_FOR(in_index, total_in_nr) {
size_t out_index = 0;
size_t in_index_tmp = in_index;
for (size_t dim = 0; dim <= ndim - 1; ++dim) {
long long dim_index = in_index_tmp / params.src_stride[dim];
in_index_tmp -= dim_index * params.src_stride[dim].divisor();
dim_index -= (long long)params.offsets[dim * 2];
dim_index = llmax(dim_index, -dim_index);
dim_index = llmin(
dim_index, 2 * (long long)params.dst_shape[dim] - dim_index - 2);
out_index += size_t(dim_index) * (size_t)params.dst_stride[dim].divisor();
}
atomic_add(&dst[out_index], src[in_index]);
}
}
template <typename T>
void padding_forward_proxy(
const TensorND& src, const TensorND& dst, size_t offsets[MEGDNN_MAX_NDIM * 2],
uint32_t mode, const float_t padding_val, cudaStream_t stream) {
ShapeParams params;
for (size_t i = 0; i < src.layout.ndim; ++i) {
params.src_shape[i] = src.layout.shape[i];
params.dst_shape[i] = dst.layout.shape[i];
params.src_stride[i] = src.layout.stride[i];
params.dst_stride[i] = dst.layout.stride[i];
params.offsets[i * 2] = offsets[i * 2];
params.offsets[i * 2 + 1] = offsets[i * 2 + 1];
}
void (*fwd_kern)(
const size_t, const size_t, const T* const, T* const, ShapeParams,
const float_t);
switch (mode) {
case param_enumv::Padding::PaddingMode::CONSTANT:
fwd_kern = paddingConst_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REPLICATE:
fwd_kern = paddingReplicate_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REFLECT:
fwd_kern = paddingReflect_kernel<T>;
break;
default:
megdnn_assert(false, "invalid padding mode");
}
size_t total_nr = dst.layout.total_nr_elems();
uint32_t nr_threads = query_blocksize_for_kernel(fwd_kern);
dim3 threads(nr_threads);
dim3 blocks(DIVUP(total_nr, nr_threads));
fwd_kern<<<blocks, threads, 0, stream>>>(
src.layout.ndim, total_nr, src.ptr<T>(), dst.ptr<T>(), params, padding_val);
after_kernel_launch();
}
template <typename T>
void padding_backward_proxy(
const TensorND& src, const TensorND& dst, size_t offsets[MEGDNN_MAX_NDIM * 2],
uint32_t mode, cudaStream_t stream) {
ShapeParams params;
for (size_t i = 0; i < src.layout.ndim; ++i) {
params.src_shape[i] = src.layout.shape[i];
params.dst_shape[i] = dst.layout.shape[i];
params.src_stride[i] = src.layout.stride[i];
params.dst_stride[i] = dst.layout.stride[i];
params.offsets[i * 2] = offsets[i * 2];
params.offsets[i * 2 + 1] = offsets[i * 2 + 1];
}
cudaMemset(dst.raw_ptr, 0, dst.layout.access_bytes());
void (*bwd_kern)(const size_t, const size_t, const T* const, T* const, ShapeParams);
switch (mode) {
case param_enumv::Padding::PaddingMode::CONSTANT:
bwd_kern = paddingConstBackward_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REPLICATE:
bwd_kern = paddingReplicateBackward_kernel<T>;
break;
case param_enumv::Padding::PaddingMode::REFLECT:
bwd_kern = paddingReflectBackward_kernel<T>;
break;
default:
megdnn_assert(false, "invalid padding mode");
}
size_t total_nr = src.layout.total_nr_elems();
uint32_t nr_threads = query_blocksize_for_kernel(bwd_kern);
dim3 threads(nr_threads);
dim3 blocks(DIVUP(total_nr, nr_threads));
bwd_kern<<<blocks, threads, 0, stream>>>(
src.layout.ndim, total_nr, src.ptr<T>(), dst.ptr<T>(), params);
after_kernel_launch();
}
#define INST(T) \
template void padding_forward_proxy<T>( \
const TensorND& src, const TensorND& dst, \
size_t offsets[MEGDNN_MAX_NDIM * 2], uint32_t mode, \
const float_t padding_val, cudaStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#undef INST
#define INST(T) \
template void padding_backward_proxy<T>( \
const TensorND& src, const TensorND& dst, \
size_t offsets[MEGDNN_MAX_NDIM * 2], uint32_t mode, cudaStream_t stream);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb)
#undef cb
#undef INST
} // namespace padding
} // namespace cuda
} // namespace megdnn |
9a6f19152685e45b24ac07c376c5060f340971e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "ActiveForceComputeGPU.cuh"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/TextureTools.h"
#include <assert.h>
/*! \file ActiveForceComputeGPU.cu
\brief Declares GPU kernel code for calculating active forces forces on the GPU. Used by
ActiveForceComputeGPU.
*/
namespace hoomd
{
namespace md
{
namespace kernel
{
//! Kernel for setting active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_force particle force on device
\param d_torque particle torque on device
\param d_orientation particle orientation on device
\param d_f_act particle active force unit vector
\param d_t_act particle active torque unit vector
\param orientationLink check if particle orientation is linked to active force vector
*/
__global__ void gpu_compute_active_force_set_forces_kernel(const unsigned int group_size,
unsigned int* d_index_array,
Scalar4* d_force,
Scalar4* d_torque,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar4* d_f_act,
const Scalar4* d_t_act,
const unsigned int N)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.w * fact.x, fact.w * fact.y, fact.w * fact.z);
quat<Scalar> quati(__ldg(d_orientation + idx));
vec3<Scalar> fi = rotate(quati, f);
d_force[idx] = vec_to_scalar4(fi, 0);
Scalar4 tact = __ldg(d_t_act + type);
vec3<Scalar> t(tact.w * tact.x, tact.w * tact.y, tact.w * tact.z);
vec3<Scalar> ti = rotate(quati, t);
d_torque[idx] = vec_to_scalar4(ti, 0);
}
//! Kernel for applying rotational diffusion to active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_pos particle positions on device
\param d_f_act particle active force unit vector
\param is2D check if simulation is 2D or 3D
\param rotationConst particle rotational diffusion constant
\param seed seed for random number generator
*/
__global__ void gpu_compute_active_force_rotational_diffusion_kernel(const unsigned int group_size,
unsigned int* d_tag,
unsigned int* d_index_array,
const Scalar4* d_pos,
Scalar4* d_orientation,
const Scalar4* d_f_act,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
Scalar4 fact = __ldg(d_f_act + type);
if (fact.w != 0)
{
unsigned int ptag = d_tag[group_idx];
quat<Scalar> quati(__ldg(d_orientation + idx));
hoomd::RandomGenerator rng(
hoomd::Seed(hoomd::RNGIdentifier::ActiveForceCompute, timestep, seed),
hoomd::Counter(ptag));
if (is2D) // 2D
{
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
vec3<Scalar> b(0, 0, 1.0);
quat<Scalar> rot_quat = quat<Scalar>::fromAxisAngle(b, delta_theta);
quati = rot_quat * quati;
quati = quati * (Scalar(1.0) / slow::sqrt(norm2(quati)));
d_orientation[idx] = quat_to_scalar4(quati);
// in 2D there is only one meaningful direction for torque
}
else // 3D: Following Stenhammar, Soft Matter, 2014
{
hoomd::SpherePointGenerator<Scalar> unit_vec;
vec3<Scalar> rand_vec;
unit_vec(rng, rand_vec);
vec3<Scalar> f(fact.x, fact.y, fact.z);
vec3<Scalar> fi = rotate(quati, f);
vec3<Scalar> aux_vec = cross(fi, rand_vec); // rotation axis
Scalar aux_vec_mag = slow::rsqrt(dot(aux_vec, aux_vec));
aux_vec *= aux_vec_mag;
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
quat<Scalar> rot_quat = quat<Scalar>::fromAxisAngle(aux_vec, delta_theta);
quati = rot_quat * quati;
quati = quati * (Scalar(1.0) / slow::sqrt(norm2(quati)));
d_orientation[idx] = quat_to_scalar4(quati);
}
}
}
hipError_t gpu_compute_active_force_set_forces(const unsigned int group_size,
unsigned int* d_index_array,
Scalar4* d_force,
Scalar4* d_torque,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar4* d_f_act,
const Scalar4* d_t_act,
const unsigned int N,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid(group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipMemset(d_force, 0, sizeof(Scalar4) * N);
hipLaunchKernelGGL((gpu_compute_active_force_set_forces_kernel),
dim3(grid),
dim3(threads),
0,
0,
group_size,
d_index_array,
d_force,
d_torque,
d_pos,
d_orientation,
d_f_act,
d_t_act,
N);
return hipSuccess;
}
hipError_t gpu_compute_active_force_rotational_diffusion(const unsigned int group_size,
unsigned int* d_tag,
unsigned int* d_index_array,
const Scalar4* d_pos,
Scalar4* d_orientation,
const Scalar4* d_f_act,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid(group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_compute_active_force_rotational_diffusion_kernel),
dim3(grid),
dim3(threads),
0,
0,
group_size,
d_tag,
d_index_array,
d_pos,
d_orientation,
d_f_act,
is2D,
rotationConst,
timestep,
seed);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
| 9a6f19152685e45b24ac07c376c5060f340971e0.cu | // Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "ActiveForceComputeGPU.cuh"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/TextureTools.h"
#include <assert.h>
/*! \file ActiveForceComputeGPU.cu
\brief Declares GPU kernel code for calculating active forces forces on the GPU. Used by
ActiveForceComputeGPU.
*/
namespace hoomd
{
namespace md
{
namespace kernel
{
//! Kernel for setting active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_force particle force on device
\param d_torque particle torque on device
\param d_orientation particle orientation on device
\param d_f_act particle active force unit vector
\param d_t_act particle active torque unit vector
\param orientationLink check if particle orientation is linked to active force vector
*/
__global__ void gpu_compute_active_force_set_forces_kernel(const unsigned int group_size,
unsigned int* d_index_array,
Scalar4* d_force,
Scalar4* d_torque,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar4* d_f_act,
const Scalar4* d_t_act,
const unsigned int N)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.w * fact.x, fact.w * fact.y, fact.w * fact.z);
quat<Scalar> quati(__ldg(d_orientation + idx));
vec3<Scalar> fi = rotate(quati, f);
d_force[idx] = vec_to_scalar4(fi, 0);
Scalar4 tact = __ldg(d_t_act + type);
vec3<Scalar> t(tact.w * tact.x, tact.w * tact.y, tact.w * tact.z);
vec3<Scalar> ti = rotate(quati, t);
d_torque[idx] = vec_to_scalar4(ti, 0);
}
//! Kernel for applying rotational diffusion to active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_pos particle positions on device
\param d_f_act particle active force unit vector
\param is2D check if simulation is 2D or 3D
\param rotationConst particle rotational diffusion constant
\param seed seed for random number generator
*/
__global__ void gpu_compute_active_force_rotational_diffusion_kernel(const unsigned int group_size,
unsigned int* d_tag,
unsigned int* d_index_array,
const Scalar4* d_pos,
Scalar4* d_orientation,
const Scalar4* d_f_act,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
Scalar4 fact = __ldg(d_f_act + type);
if (fact.w != 0)
{
unsigned int ptag = d_tag[group_idx];
quat<Scalar> quati(__ldg(d_orientation + idx));
hoomd::RandomGenerator rng(
hoomd::Seed(hoomd::RNGIdentifier::ActiveForceCompute, timestep, seed),
hoomd::Counter(ptag));
if (is2D) // 2D
{
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
vec3<Scalar> b(0, 0, 1.0);
quat<Scalar> rot_quat = quat<Scalar>::fromAxisAngle(b, delta_theta);
quati = rot_quat * quati;
quati = quati * (Scalar(1.0) / slow::sqrt(norm2(quati)));
d_orientation[idx] = quat_to_scalar4(quati);
// in 2D there is only one meaningful direction for torque
}
else // 3D: Following Stenhammar, Soft Matter, 2014
{
hoomd::SpherePointGenerator<Scalar> unit_vec;
vec3<Scalar> rand_vec;
unit_vec(rng, rand_vec);
vec3<Scalar> f(fact.x, fact.y, fact.z);
vec3<Scalar> fi = rotate(quati, f);
vec3<Scalar> aux_vec = cross(fi, rand_vec); // rotation axis
Scalar aux_vec_mag = slow::rsqrt(dot(aux_vec, aux_vec));
aux_vec *= aux_vec_mag;
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
quat<Scalar> rot_quat = quat<Scalar>::fromAxisAngle(aux_vec, delta_theta);
quati = rot_quat * quati;
quati = quati * (Scalar(1.0) / slow::sqrt(norm2(quati)));
d_orientation[idx] = quat_to_scalar4(quati);
}
}
}
hipError_t gpu_compute_active_force_set_forces(const unsigned int group_size,
unsigned int* d_index_array,
Scalar4* d_force,
Scalar4* d_torque,
const Scalar4* d_pos,
const Scalar4* d_orientation,
const Scalar4* d_f_act,
const Scalar4* d_t_act,
const unsigned int N,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid(group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipMemset(d_force, 0, sizeof(Scalar4) * N);
hipLaunchKernelGGL((gpu_compute_active_force_set_forces_kernel),
dim3(grid),
dim3(threads),
0,
0,
group_size,
d_index_array,
d_force,
d_torque,
d_pos,
d_orientation,
d_f_act,
d_t_act,
N);
return hipSuccess;
}
hipError_t gpu_compute_active_force_rotational_diffusion(const unsigned int group_size,
unsigned int* d_tag,
unsigned int* d_index_array,
const Scalar4* d_pos,
Scalar4* d_orientation,
const Scalar4* d_f_act,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid(group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_compute_active_force_rotational_diffusion_kernel),
dim3(grid),
dim3(threads),
0,
0,
group_size,
d_tag,
d_index_array,
d_pos,
d_orientation,
d_f_act,
is2D,
rotationConst,
timestep,
seed);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
|
6fbc6ff741810adcbe3f2a5fca25b37a84aa7f30.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _STEER_TO_AVOID_CLOSE_NEIGHBORS_CU_
#define _STEER_TO_AVOID_CLOSE_NEIGHBORS_CU_
#include <hip/hip_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborData.h"
#include "CUDAKernelOptions.cu"
#include "CUDAVectorUtilities.cu"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define N_I(i) (CUT_BANK_CHECKER(((int*)neighbor), i))
#define F_F(i) (CUT_BANK_CHECKER(((float*)forward), i))
#define P_F(i) (CUT_BANK_CHECKER(((float*)position), i))
#define S_F(i) (CUT_BANK_CHECKER(((float*)steering), i))
#define N(i) (CUT_BANK_CHECKER(neighbor, i))
#define F(i) (CUT_BANK_CHECKER(forward, i))
#define P(i) (CUT_BANK_CHECKER(position, i))
#define S(i) (CUT_BANK_CHECKER(steering, i))
#define SP(i) (CUT_BANK_CHECKER(speed, i))
#else
#define N_I(i) ((int*)neighbor)[i]
#define F_F(i) ((float*)forward)[i]
#define P_F(i) ((float*)position)[i]
#define S_F(i) ((float*)steering)[i]
#define N(i) neighbor[i]
#define F(i) forward[i]
#define P(i) position[i]
#define S(i) steering[i]
#define SP(i) speed[i]
#endif
__global__ void
steerToAvoidCloseNeighbors(VehicleData *vehicleData, VehicleConst *vehicleConst, float3 *steeringVectors, NeighborData *neighbors, float minSeparationDistance, float weight, kernel_options options)
{
int id = (blockIdx.x * blockDim.x + threadIdx.x);
int blockOffset = (blockDim.x * blockIdx.x * 3);
int blockOffsetNeighbors = (blockDim.x * blockIdx.x * (MAX_NEIGHBORS + 1));
// shared memory for NeighborData
__shared__ NeighborData neighbor[TPB];
// shared memory for steering vectors
__shared__ float3 steering[TPB];
S(threadIdx.x) = make_float3(0.f, 0.f, 0.f);
__syncthreads();
// copy neighbor data from global memory (coalesced)
int i;
for (i = 0; i < (sizeof(NeighborData) / sizeof(int)); i++) {
N_I(threadIdx.x + i*blockDim.x) = ((int*)neighbors)[blockOffsetNeighbors + threadIdx.x + i*blockDim.x];
}
__syncthreads();
// for each of the other vehicles...
for (i = 0; i < N(threadIdx.x).numOfNeighbors; i++)
{
int idOfNeighbor = N(threadIdx.x).idsOfNeighbors[i];
float sumOfRadii = (*vehicleConst).radius[id] + (*vehicleConst).radius[idOfNeighbor];
float minCenterToCenter = minSeparationDistance + sumOfRadii;
float3 offset = float3Sub((*vehicleData).position[idOfNeighbor], (*vehicleData).position[id]);
float currentDistance = float3Length(offset);
if (currentDistance < minCenterToCenter)
{
offset = float3Mul(offset, -1.f);
S(threadIdx.x) = float3PerpendicularComponent(offset, (*vehicleData).forward[id]);
break;
}
}
// multiply by weight
S(threadIdx.x) = float3Mul(S(threadIdx.x), weight);
if ((options & IGNORE_UNLESS_ZERO) != 0
&& (steeringVectors[id].x != 0.f
|| steeringVectors[id].y != 0.f
|| steeringVectors[id].z != 0.f))
{
S(threadIdx.x) = steeringVectors[id];
} else {
S(threadIdx.x) = float3Add(S(threadIdx.x), steeringVectors[id]);
}
__syncthreads();
// writing back to global memory (coalesced)
((float*)steeringVectors)[blockOffset + threadIdx.x] = S_F(threadIdx.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + blockDim.x] = S_F(threadIdx.x + blockDim.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + 2*blockDim.x] = S_F(threadIdx.x + 2*blockDim.x);
}
#endif // _STEER_TO_AVOID_CLOSE_NEIGHBORS_CU_ | 6fbc6ff741810adcbe3f2a5fca25b37a84aa7f30.cu | #ifndef _STEER_TO_AVOID_CLOSE_NEIGHBORS_CU_
#define _STEER_TO_AVOID_CLOSE_NEIGHBORS_CU_
#include <cuda_runtime.h>
#include "OpenSteer/VehicleData.h"
#include "OpenSteer/NeighborData.h"
#include "CUDAKernelOptions.cu"
#include "CUDAVectorUtilities.cu"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define N_I(i) (CUT_BANK_CHECKER(((int*)neighbor), i))
#define F_F(i) (CUT_BANK_CHECKER(((float*)forward), i))
#define P_F(i) (CUT_BANK_CHECKER(((float*)position), i))
#define S_F(i) (CUT_BANK_CHECKER(((float*)steering), i))
#define N(i) (CUT_BANK_CHECKER(neighbor, i))
#define F(i) (CUT_BANK_CHECKER(forward, i))
#define P(i) (CUT_BANK_CHECKER(position, i))
#define S(i) (CUT_BANK_CHECKER(steering, i))
#define SP(i) (CUT_BANK_CHECKER(speed, i))
#else
#define N_I(i) ((int*)neighbor)[i]
#define F_F(i) ((float*)forward)[i]
#define P_F(i) ((float*)position)[i]
#define S_F(i) ((float*)steering)[i]
#define N(i) neighbor[i]
#define F(i) forward[i]
#define P(i) position[i]
#define S(i) steering[i]
#define SP(i) speed[i]
#endif
__global__ void
steerToAvoidCloseNeighbors(VehicleData *vehicleData, VehicleConst *vehicleConst, float3 *steeringVectors, NeighborData *neighbors, float minSeparationDistance, float weight, kernel_options options)
{
int id = (blockIdx.x * blockDim.x + threadIdx.x);
int blockOffset = (blockDim.x * blockIdx.x * 3);
int blockOffsetNeighbors = (blockDim.x * blockIdx.x * (MAX_NEIGHBORS + 1));
// shared memory for NeighborData
__shared__ NeighborData neighbor[TPB];
// shared memory for steering vectors
__shared__ float3 steering[TPB];
S(threadIdx.x) = make_float3(0.f, 0.f, 0.f);
__syncthreads();
// copy neighbor data from global memory (coalesced)
int i;
for (i = 0; i < (sizeof(NeighborData) / sizeof(int)); i++) {
N_I(threadIdx.x + i*blockDim.x) = ((int*)neighbors)[blockOffsetNeighbors + threadIdx.x + i*blockDim.x];
}
__syncthreads();
// for each of the other vehicles...
for (i = 0; i < N(threadIdx.x).numOfNeighbors; i++)
{
int idOfNeighbor = N(threadIdx.x).idsOfNeighbors[i];
float sumOfRadii = (*vehicleConst).radius[id] + (*vehicleConst).radius[idOfNeighbor];
float minCenterToCenter = minSeparationDistance + sumOfRadii;
float3 offset = float3Sub((*vehicleData).position[idOfNeighbor], (*vehicleData).position[id]);
float currentDistance = float3Length(offset);
if (currentDistance < minCenterToCenter)
{
offset = float3Mul(offset, -1.f);
S(threadIdx.x) = float3PerpendicularComponent(offset, (*vehicleData).forward[id]);
break;
}
}
// multiply by weight
S(threadIdx.x) = float3Mul(S(threadIdx.x), weight);
if ((options & IGNORE_UNLESS_ZERO) != 0
&& (steeringVectors[id].x != 0.f
|| steeringVectors[id].y != 0.f
|| steeringVectors[id].z != 0.f))
{
S(threadIdx.x) = steeringVectors[id];
} else {
S(threadIdx.x) = float3Add(S(threadIdx.x), steeringVectors[id]);
}
__syncthreads();
// writing back to global memory (coalesced)
((float*)steeringVectors)[blockOffset + threadIdx.x] = S_F(threadIdx.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + blockDim.x] = S_F(threadIdx.x + blockDim.x);
((float*)steeringVectors)[blockOffset + threadIdx.x + 2*blockDim.x] = S_F(threadIdx.x + 2*blockDim.x);
}
#endif // _STEER_TO_AVOID_CLOSE_NEIGHBORS_CU_ |
7c4201f53a419ebc2413e95e9c87716afc7235ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transform.h>
__device__ double op(double d1,double *params) {
return -d1;
}
extern "C"
__global__ void neg_strided_double(int n,int idx,double *dy,int incy,double *params,double *result) {
transform(n,idx,dy,incy,params,result);
}
| 7c4201f53a419ebc2413e95e9c87716afc7235ab.cu | #include <transform.h>
__device__ double op(double d1,double *params) {
return -d1;
}
extern "C"
__global__ void neg_strided_double(int n,int idx,double *dy,int incy,double *params,double *result) {
transform(n,idx,dy,incy,params,result);
}
|
33ede4ef504dbf35660bea402ac053102d85f42c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
This file implements upper case, and is called by ztrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "ztrtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
static __device__ void
ztrtri_diag_upper_device(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
//int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ magmaDoubleComplex sB[IB*IB];
magmaDoubleComplex y_tx;
// load upper triangle of inner block of A; zero lower triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx <= j && blk_ind + j < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_Z_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_Z_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_Z_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB];
}
}
// compute elements 0:j-1 of j-th column.
for( int j=1; j < IB; j++ ) {
if ( tx < j ) {
// trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_Z_ZERO;
#pragma unroll
for( int k=0; k < j; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(0:j-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB upper triangular matrix, and B its inverse.
Then the block decomposition
[ A11 A12 ] * [ B11 B12 ] = [ I 0 ]
[ 0 A22 ] [ 0 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22.
ztrtri_diag_kernel inverts A11 and A22.
triple_zgemm16 routines multiply:
part 1: B12 = A12 * B22,
part 2: B12 = -B11 * B12.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 B12 ]
which contains [ 0 B22 ].
Outer blocks are NB x NB.
A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on
the bottom. Unfortunately, this means checking every single reference. We
could easily verify that A12 is full, and select between a fast version
without checks and a slow version with checks.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm16_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm16_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm32_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm32_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
//int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm64_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm64_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm_above64_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B12; write to B21 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm_above64_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
B = d_dinvA + jb; // B12, read from B21 temp location
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B21 temp location
*/
static __device__ void
triple_zgemm_above64_part3_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B21 temp location
magmaDoubleComplex *B21;
int ldb = NB;
B21 = d_dinvA + jb;
B21 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B21[i*ldb] = MAGMA_Z_ZERO;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
ztrtri_diag_upper_kernel(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
ztrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part3_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
ztrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, magmaDoubleComplex const * const * dA_array, int lda, magmaDoubleComplex **dinvA_array)
{
int batchid = blockIdx.z;
ztrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part3_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| 33ede4ef504dbf35660bea402ac053102d85f42c.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
This file implements upper case, and is called by ztrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "ztrtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
static __device__ void
ztrtri_diag_upper_device(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
//int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ magmaDoubleComplex sB[IB*IB];
magmaDoubleComplex y_tx;
// load upper triangle of inner block of A; zero lower triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx <= j && blk_ind + j < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_Z_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_Z_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_Z_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_Z_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_Z_ONE / sB[tx + tx*IB];
}
}
// compute elements 0:j-1 of j-th column.
for( int j=1; j < IB; j++ ) {
if ( tx < j ) {
// trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_Z_ZERO;
#pragma unroll
for( int k=0; k < j; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(0:j-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB upper triangular matrix, and B its inverse.
Then the block decomposition
[ A11 A12 ] * [ B11 B12 ] = [ I 0 ]
[ 0 A22 ] [ 0 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22.
ztrtri_diag_kernel inverts A11 and A22.
triple_zgemm16 routines multiply:
part 1: B12 = A12 * B22,
part 2: B12 = -B11 * B12.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 B12 ]
which contains [ 0 B22 ].
Outer blocks are NB x NB.
A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on
the bottom. Unfortunately, this means checking every single reference. We
could easily verify that A12 is full, and select between a fast version
without checks and a slow version with checks.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm16_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm16_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm32_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm32_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
//int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm64_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm64_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_zgemm_above64_part1_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B12; write to B21 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
zaxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
zaxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
zaxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
zaxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
zaxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
zaxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
zaxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
zaxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
zaxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
zaxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
zaxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_zgemm_above64_part2_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaDoubleComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaDoubleComplex *A, *B;
magmaDoubleComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
B = d_dinvA + jb; // B12, read from B21 temp location
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaDoubleComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaDoubleComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaDoubleComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
zaxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
zaxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
zaxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
zaxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
zaxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
zaxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
zaxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
zaxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
zaxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
zaxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
zaxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
zaxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
zaxpy16( rA[0], &sB[12][0], rC );
zaxpy16( rA[1], &sB[13][0], rC );
zaxpy16( rA[2], &sB[14][0], rC );
zaxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B21 temp location
*/
static __device__ void
triple_zgemm_above64_part3_upper_device(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B21 temp location
magmaDoubleComplex *B21;
int ldb = NB;
B21 = d_dinvA + jb;
B21 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B21[i*ldb] = MAGMA_Z_ZERO;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
ztrtri_diag_upper_kernel(
magma_diag_t diag, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *d_dinvA)
{
ztrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part1_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part2_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part3_upper_kernel(
int n, const magmaDoubleComplex *Ain, int lda, magmaDoubleComplex *d_dinvA, int jb, int npages)
{
triple_zgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
ztrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, magmaDoubleComplex const * const * dA_array, int lda, magmaDoubleComplex **dinvA_array)
{
int batchid = blockIdx.z;
ztrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm16_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm32_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm64_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part1_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part2_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_zgemm_above64_part3_upper_kernel_batched(
int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_zgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
|
0ad62a6c6c8dc584772629997c79bca0822b3cc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <omp.h>
#include <stdio.h>
using namespace std;
template <class T>
double cpu_perman64_sparse(int* cptrs, int* rows, T* cvals, double x[], int nov, long long start, long long end, int threads) {
double p = 0; //product of the elements in vector 'x'
long long one = 1;
long long chunk_size = (end - start) / threads + 1;
omp_set_num_threads(threads);
#pragma omp parallel
{
double my_x[nov];
for (int i = 0; i < nov; i++) {
my_x[i] = x[i];
}
int tid = omp_get_thread_num();
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[rows[j]] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[j] == 0) {
zero_num++;
} else {
prod *= my_x[j]; //product of the elements in vector 'x'
}
}
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
//compute the gray code
k = __builtin_ctzll(i);
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[rows[j]] == 0) {
zero_num--;
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
} else {
prod /= my_x[rows[j]];
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[rows[j]] == 0) {
zero_num++;
} else {
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
#pragma omp atomic
p += my_p;
}
return p;
}
template <class T>
double cpu_perman64_skipper(int *rptrs, int *cols, int* cptrs, int* rows, T* cvals, double x[], int nov, long long start, long long end, int threads) {
//first initialize the vector then we will copy it to ourselves
double p;
int j, ptr;
unsigned long long ci, chunk_size, change_j;
p = 0;
int no_chunks = 512;
chunk_size = (end - start + 1) / no_chunks + 1;
#pragma omp parallel num_threads(threads) private(j, ci, change_j)
{
double my_x[nov];
#pragma omp for schedule(dynamic, 1)
for(int cid = 0; cid < no_chunks; cid++) {
// int tid = omp_get_thread_num();
unsigned long long my_start = start + cid * chunk_size;
unsigned long long my_end = min(start + ((cid+1) * chunk_size), end);
//update if neccessary
double my_p = 0;
unsigned long long my_gray;
unsigned long long my_prev_gray = 0;
memcpy(my_x, x, sizeof(double) * nov);
int ptr, last_zero;
unsigned long long period, steps, step_start;
unsigned long long i = my_start;
while (i < my_end) {
//k = __builtin_ctzll(i + 1);
my_gray = i ^ (i >> 1);
unsigned long long gray_diff = my_prev_gray ^ my_gray;
j = 0;
while(gray_diff > 0) { // this contains the bit to be updated
unsigned long long onej = 1ULL << j;
if(gray_diff & onej) { // if bit l is changed
gray_diff ^= onej; // unset bit
if(my_gray & onej) { // do the update
for (ptr = cptrs[j]; ptr < cptrs[j + 1]; ptr++) {
my_x[rows[ptr]] += cvals[ptr];
}
}
else {
for (ptr = cptrs[j]; ptr < cptrs[j + 1]; ptr++) {
my_x[rows[ptr]] -= cvals[ptr];
}
}
}
j++;
}
//counter++;
my_prev_gray = my_gray;
last_zero = -1;
double my_prod = 1;
for(j = nov - 1; j >= 0; j--) {
my_prod *= my_x[j];
if(my_x[j] == 0) {
last_zero = j;
break;
}
}
if(my_prod != 0) {
my_p += ((i&1ULL)? -1.0:1.0) * my_prod;
i++;
}
else {
change_j = -1;
for (ptr = rptrs[last_zero]; ptr < rptrs[last_zero + 1]; ptr++) {
step_start = 1ULL << cols[ptr];
period = step_start << 1;
ci = step_start;
if(i >= step_start) {
steps = (i - step_start) / period;
ci = step_start + ((steps + 1) * period);
}
if(ci < change_j) {
change_j = ci;
}
}
i++;
if(change_j > i) {
i = change_j;
}
}
}
#pragma omp critical
p += my_p;
}
}
return p;
}
template <class T>
__global__ void kernel_xlocal_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov) {
float my_x[40];
for (int k = 0; k < nov; k++) {
my_x[k] = x[k];
}
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long start = 1;
long long end = (1LL << (nov-1));
long long chunk_size = end / number_of_threads + 1;
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[rows[j]] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[j] == 0) {
zero_num++;
} else {
prod *= my_x[j]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[rows[j]] == 0) {
zero_num--;
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
} else {
prod /= my_x[rows[j]];
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[rows[j]] == 0) {
zero_num++;
} else {
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
for (int k = 0; k < nov; k++) {
my_x[thread_id*nov + k] = x[k];
}
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long start = 1;
long long end = (1LL << (nov-1));
long long chunk_size = end / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[thread_id*nov + rows[j]] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[thread_id*nov + j] == 0) {
zero_num++;
} else {
prod *= my_x[thread_id*nov + j]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[thread_id*nov + rows[j]] == 0) {
zero_num--;
my_x[thread_id*nov + rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[thread_id*nov + rows[j]]; //product of the elements in vector 'x'
} else {
prod /= my_x[thread_id*nov + rows[j]];
my_x[thread_id*nov + rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[thread_id*nov + rows[j]] == 0) {
zero_num++;
} else {
prod *= my_x[thread_id*nov + rows[j]]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_coalescing_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
for (int k = 0; k < nov; k++) {
my_x[block_dim*k + thread_id] = x[k];
}
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long start = 1;
long long end = (1LL << (nov-1));
long long chunk_size = end / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[block_dim*rows[j] + thread_id] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[block_dim*j + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[block_dim*rows[j] + thread_id] == 0) {
zero_num--;
my_x[block_dim*rows[j] + thread_id] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[block_dim*rows[j] + thread_id]; //product of the elements in vector 'x'
} else {
prod /= my_x[block_dim*rows[j] + thread_id];
my_x[block_dim*rows[j] + thread_id] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[block_dim*rows[j] + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*rows[j] + thread_id]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_coalescing_mshared_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov, int total, long long start, long long end) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
int *shared_cptrs = (int*) &my_x[nov * block_dim]; // size = nov + 1
int *shared_rows = (int*) &shared_cptrs[nov + 1]; // size = total num of elts
T *shared_cvals = (T*) &shared_rows[total]; // size = total num of elts
for (int k = 0; k < nov; k++) {
my_x[block_dim*k + thread_id] = x[k];
shared_cptrs[k] = cptrs[k];
}
shared_cptrs[nov] = cptrs[nov];
for (int k = 0; k < total; k++) {
shared_rows[k] = rows[k];
shared_cvals[k] = cvals[k];
}
__syncthreads();
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long chunk_size = (end - start) / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = shared_cptrs[k]; j < shared_cptrs[k+1]; j++) {
my_x[block_dim*shared_rows[j] + thread_id] += shared_cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[block_dim*j + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = shared_cptrs[k]; j < shared_cptrs[k+1]; j++) {
if (my_x[block_dim*shared_rows[j] + thread_id] == 0) {
zero_num--;
my_x[block_dim*shared_rows[j] + thread_id] += s * shared_cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[block_dim*shared_rows[j] + thread_id]; //product of the elements in vector 'x'
} else {
prod /= my_x[block_dim*shared_rows[j] + thread_id];
my_x[block_dim*shared_rows[j] + thread_id] += s * shared_cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[block_dim*shared_rows[j] + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*shared_rows[j] + thread_id]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_coalescing_mshared_skipper(int* rptrs, int* cols, int* cptrs, int* rows, T* cvals, double* x, double* p, int nov, int total, long long start, long long end) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
int *shared_rptrs = (int*) &my_x[nov * block_dim]; // size = nov + 1
int *shared_cols = (int*) &shared_rptrs[nov + 1]; // size = total num of elts
int *shared_cptrs = (int*) &shared_cols[total]; // size = nov + 1
int *shared_rows = (int*) &shared_cptrs[nov + 1]; // size = total num of elts
T *shared_cvals = (T*) &shared_rows[total]; // size = total num of elts
for (int k = 0; k < nov; k++) {
my_x[block_dim*k + thread_id] = x[k];
shared_rptrs[k] = rptrs[k];
shared_cptrs[k] = cptrs[k];
}
shared_rptrs[nov] = rptrs[nov];
shared_cptrs[nov] = cptrs[nov];
for (int k = 0; k < total; k++) {
shared_cols[k] = cols[k];
shared_rows[k] = rows[k];
shared_cvals[k] = cvals[k];
}
__syncthreads();
long long number_of_threads = blockDim.x * gridDim.x;
long long chunk_size = (end - start) / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long prev_gray = 0;
long long gray;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[block_dim*j + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x'
}
}
long long gray_diff;
unsigned long long change_j, ci, period, steps, step_start;
int j = 0;
while (i < my_end) {
gray = i ^ (i >> 1);
gray_diff = prev_gray ^ gray;
j = 0;
while(gray_diff > 0) { // this contains the bit to be updated
long long onej = 1LL << j;
if(gray_diff & onej) { // if bit l is changed
gray_diff ^= onej; // unset bit
if(gray & onej) { // do the update
for (int ptr = shared_cptrs[j]; ptr < shared_cptrs[j + 1]; ptr++) {
my_x[block_dim*shared_rows[ptr] + thread_id] += shared_cvals[ptr];
}
}
else {
for (int ptr = shared_cptrs[j]; ptr < shared_cptrs[j + 1]; ptr++) {
my_x[block_dim*shared_rows[ptr] + thread_id] -= shared_cvals[ptr];
}
}
}
j++;
}
prev_gray = gray;
int last_zero = -1;
prod = 1.0;
for(j = nov - 1; j >= 0; j--) {
prod *= my_x[block_dim*j + thread_id];
if(my_x[block_dim*j + thread_id] == 0) {
last_zero = j;
break;
}
}
if(prod != 0) {
my_p += ((i&1LL)? -1.0:1.0) * prod;
i++;
}
else {
change_j = -1;
for (int ptr = shared_rptrs[last_zero]; ptr < shared_rptrs[last_zero + 1]; ptr++) {
step_start = 1ULL << shared_cols[ptr];
period = step_start << 1;
ci = step_start;
if(i >= step_start) {
steps = (i - step_start) / period;
ci = step_start + ((steps + 1) * period);
}
if(ci < change_j) {
change_j = ci;
}
}
i++;
if(change_j > i) {
i = change_j;
}
}
}
p[tid] = my_p;
}
template <class T>
double gpu_perman64_xlocal_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
hipSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
double stt = omp_get_wtime();
hipLaunchKernelGGL(( kernel_xlocal_sparse), dim3(grid_dim) , dim3(block_dim) , 0, 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov);
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_p);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
hipSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
double stt = omp_get_wtime();
hipLaunchKernelGGL(( kernel_xshared_sparse), dim3(grid_dim) , dim3(block_dim) , nov*block_dim*sizeof(float) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov);
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_p);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
hipSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
double stt = omp_get_wtime();
hipLaunchKernelGGL(( kernel_xshared_coalescing_sparse), dim3(grid_dim) , dim3(block_dim) , nov*block_dim*sizeof(float) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov);
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_p);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
hipSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
long long start = 1;
long long end = (1LL << (nov-1));
double stt = omp_get_wtime();
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start, end);
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_p);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpu_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num];
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p_partial[gpu_id] = 0;
}
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / gpu_num;
#pragma omp parallel for num_threads(gpu_num)
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
hipSetDevice(gpu_id);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
double stt = omp_get_wtime();
if (gpu_id == gpu_num-1) {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + gpu_id*offset), end);
} else {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + gpu_id*offset), (start + (gpu_id+1)*offset));
}
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << gpu_id << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_p);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[gpu_id] += h_p[i];
}
delete[] h_p;
}
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p += p_partial[gpu_id];
}
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpucpu_chunks_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, bool cpu, int threads, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num+1];
for (int id = 0; id < gpu_num+1; id++) {
p_partial[id] = 0;
}
int number_of_chunks = 1;
for (int i = 30; i < nov; i++) {
number_of_chunks *= 2;
}
int chunk_id = 0;
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / number_of_chunks;
omp_set_nested(1);
omp_set_dynamic(0);
#pragma omp parallel for num_threads(gpu_num+1)
for (int id = 0; id < gpu_num+1; id++) {
if (id == gpu_num) {
if (cpu) {
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
p_partial[id] += cpu_perman64_sparse(cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), end, threads);
} else {
p_partial[id] += cpu_perman64_sparse(cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset), threads);
}
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by CPU" << " in " << (enn - stt) << endl;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
}
} else {
hipSetDevice(id);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), end);
} else {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset));
}
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by kernel" << id << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[id] += h_p[i];
}
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
hipFree(d_x);
hipFree(d_p);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
delete[] h_p;
}
}
for (int id = 0; id < gpu_num+1; id++) {
p += p_partial[id];
}
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_skipper(T* mat, int* rptrs, int* cols, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
hipSetDevice(1);
T *d_cvals;
int *d_rptrs, *d_cols, *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_rptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_cols, (total) * sizeof(int));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_rptrs, rptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cols, cols, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
long long start = 1;
long long end = (1LL << (nov-1));
double stt = omp_get_wtime();
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_skipper), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + 2*(nov+1)*sizeof(int) + 2*total*sizeof(int) + total*sizeof(T)) , 0, d_rptrs, d_cols, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start, end);
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_p);
hipFree(d_rptrs);
hipFree(d_cols);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpucpu_chunks_skipper(T* mat, int* rptrs, int* cols, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, bool cpu, int threads, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num+1];
for (int id = 0; id < gpu_num+1; id++) {
p_partial[id] = 0;
}
int number_of_chunks = 1;
for (int i = 30; i < nov; i++) {
number_of_chunks *= 2;
}
int chunk_id = 0;
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / number_of_chunks;
omp_set_nested(1);
omp_set_dynamic(0);
#pragma omp parallel for num_threads(gpu_num+1)
for (int id = 0; id < gpu_num+1; id++) {
if (id == gpu_num) {
if (cpu) {
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
p_partial[id] += cpu_perman64_skipper(rptrs, cols, cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), end, threads);
} else {
p_partial[id] += cpu_perman64_skipper(rptrs, cols, cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset), threads);
}
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by CPU" << " in " << (enn - stt) << endl;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
}
} else {
hipSetDevice(id);
T *d_cvals;
int *d_rptrs, *d_cols, *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_rptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_cols, (total) * sizeof(int));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_rptrs, rptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cols, cols, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_skipper), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + 2*(nov+1)*sizeof(int) + 2*total*sizeof(int) + total*sizeof(T)) , 0, d_rptrs, d_cols, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), end);
} else {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_skipper), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + 2*(nov+1)*sizeof(int) + 2*total*sizeof(int) + total*sizeof(T)) , 0, d_rptrs, d_cols, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset));
}
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by kernel" << id << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[id] += h_p[i];
}
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
hipFree(d_x);
hipFree(d_p);
hipFree(d_rptrs);
hipFree(d_cols);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
delete[] h_p;
}
}
for (int id = 0; id < gpu_num+1; id++) {
p += p_partial[id];
}
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpu_sparse_manual_distribution(T* mat, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num];
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p_partial[gpu_id] = 0;
}
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / 8;
#pragma omp parallel for num_threads(gpu_num)
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
hipSetDevice(gpu_id);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
hipMalloc( &d_x, (nov) * sizeof(double));
hipMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
hipMalloc( &d_cptrs, (nov + 1) * sizeof(int));
hipMalloc( &d_rows, (total) * sizeof(int));
hipMalloc( &d_cvals, (total) * sizeof(T));
hipMemcpy( d_x, x, (nov) * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_rows, rows, (total) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy( d_cvals, cvals, (total) * sizeof(T), hipMemcpyHostToDevice);
double stt = omp_get_wtime();
if (gpu_id == 0) {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start, start + 3*offset);
} else if (gpu_id == 1) {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start + 3*offset, start + 6*offset);
} else if (gpu_id == 2) {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start + 6*offset, start + 7*offset);
} else if (gpu_id == 3) {
hipLaunchKernelGGL(( kernel_xshared_coalescing_mshared_sparse), dim3(grid_dim) , dim3(block_dim) , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) , 0, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start + 7*offset, end);
}
hipDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << gpu_id << " in " << (enn - stt) << endl;
hipMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_p);
hipFree(d_cptrs);
hipFree(d_rows);
hipFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[gpu_id] += h_p[i];
}
delete[] h_p;
}
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p += p_partial[gpu_id];
}
return((4*(nov&1)-2) * p);
} | 0ad62a6c6c8dc584772629997c79bca0822b3cc6.cu | #include <omp.h>
#include <stdio.h>
using namespace std;
template <class T>
double cpu_perman64_sparse(int* cptrs, int* rows, T* cvals, double x[], int nov, long long start, long long end, int threads) {
double p = 0; //product of the elements in vector 'x'
long long one = 1;
long long chunk_size = (end - start) / threads + 1;
omp_set_num_threads(threads);
#pragma omp parallel
{
double my_x[nov];
for (int i = 0; i < nov; i++) {
my_x[i] = x[i];
}
int tid = omp_get_thread_num();
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[rows[j]] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[j] == 0) {
zero_num++;
} else {
prod *= my_x[j]; //product of the elements in vector 'x'
}
}
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
//compute the gray code
k = __builtin_ctzll(i);
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[rows[j]] == 0) {
zero_num--;
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
} else {
prod /= my_x[rows[j]];
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[rows[j]] == 0) {
zero_num++;
} else {
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
#pragma omp atomic
p += my_p;
}
return p;
}
template <class T>
double cpu_perman64_skipper(int *rptrs, int *cols, int* cptrs, int* rows, T* cvals, double x[], int nov, long long start, long long end, int threads) {
//first initialize the vector then we will copy it to ourselves
double p;
int j, ptr;
unsigned long long ci, chunk_size, change_j;
p = 0;
int no_chunks = 512;
chunk_size = (end - start + 1) / no_chunks + 1;
#pragma omp parallel num_threads(threads) private(j, ci, change_j)
{
double my_x[nov];
#pragma omp for schedule(dynamic, 1)
for(int cid = 0; cid < no_chunks; cid++) {
// int tid = omp_get_thread_num();
unsigned long long my_start = start + cid * chunk_size;
unsigned long long my_end = min(start + ((cid+1) * chunk_size), end);
//update if neccessary
double my_p = 0;
unsigned long long my_gray;
unsigned long long my_prev_gray = 0;
memcpy(my_x, x, sizeof(double) * nov);
int ptr, last_zero;
unsigned long long period, steps, step_start;
unsigned long long i = my_start;
while (i < my_end) {
//k = __builtin_ctzll(i + 1);
my_gray = i ^ (i >> 1);
unsigned long long gray_diff = my_prev_gray ^ my_gray;
j = 0;
while(gray_diff > 0) { // this contains the bit to be updated
unsigned long long onej = 1ULL << j;
if(gray_diff & onej) { // if bit l is changed
gray_diff ^= onej; // unset bit
if(my_gray & onej) { // do the update
for (ptr = cptrs[j]; ptr < cptrs[j + 1]; ptr++) {
my_x[rows[ptr]] += cvals[ptr];
}
}
else {
for (ptr = cptrs[j]; ptr < cptrs[j + 1]; ptr++) {
my_x[rows[ptr]] -= cvals[ptr];
}
}
}
j++;
}
//counter++;
my_prev_gray = my_gray;
last_zero = -1;
double my_prod = 1;
for(j = nov - 1; j >= 0; j--) {
my_prod *= my_x[j];
if(my_x[j] == 0) {
last_zero = j;
break;
}
}
if(my_prod != 0) {
my_p += ((i&1ULL)? -1.0:1.0) * my_prod;
i++;
}
else {
change_j = -1;
for (ptr = rptrs[last_zero]; ptr < rptrs[last_zero + 1]; ptr++) {
step_start = 1ULL << cols[ptr];
period = step_start << 1;
ci = step_start;
if(i >= step_start) {
steps = (i - step_start) / period;
ci = step_start + ((steps + 1) * period);
}
if(ci < change_j) {
change_j = ci;
}
}
i++;
if(change_j > i) {
i = change_j;
}
}
}
#pragma omp critical
p += my_p;
}
}
return p;
}
template <class T>
__global__ void kernel_xlocal_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov) {
float my_x[40];
for (int k = 0; k < nov; k++) {
my_x[k] = x[k];
}
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long start = 1;
long long end = (1LL << (nov-1));
long long chunk_size = end / number_of_threads + 1;
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[rows[j]] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[j] == 0) {
zero_num++;
} else {
prod *= my_x[j]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[rows[j]] == 0) {
zero_num--;
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
} else {
prod /= my_x[rows[j]];
my_x[rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[rows[j]] == 0) {
zero_num++;
} else {
prod *= my_x[rows[j]]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
for (int k = 0; k < nov; k++) {
my_x[thread_id*nov + k] = x[k];
}
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long start = 1;
long long end = (1LL << (nov-1));
long long chunk_size = end / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[thread_id*nov + rows[j]] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[thread_id*nov + j] == 0) {
zero_num++;
} else {
prod *= my_x[thread_id*nov + j]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[thread_id*nov + rows[j]] == 0) {
zero_num--;
my_x[thread_id*nov + rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[thread_id*nov + rows[j]]; //product of the elements in vector 'x'
} else {
prod /= my_x[thread_id*nov + rows[j]];
my_x[thread_id*nov + rows[j]] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[thread_id*nov + rows[j]] == 0) {
zero_num++;
} else {
prod *= my_x[thread_id*nov + rows[j]]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_coalescing_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
for (int k = 0; k < nov; k++) {
my_x[block_dim*k + thread_id] = x[k];
}
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long start = 1;
long long end = (1LL << (nov-1));
long long chunk_size = end / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
my_x[block_dim*rows[j] + thread_id] += cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[block_dim*j + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = cptrs[k]; j < cptrs[k+1]; j++) {
if (my_x[block_dim*rows[j] + thread_id] == 0) {
zero_num--;
my_x[block_dim*rows[j] + thread_id] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[block_dim*rows[j] + thread_id]; //product of the elements in vector 'x'
} else {
prod /= my_x[block_dim*rows[j] + thread_id];
my_x[block_dim*rows[j] + thread_id] += s * cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[block_dim*rows[j] + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*rows[j] + thread_id]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_coalescing_mshared_sparse(int* cptrs, int* rows, T* cvals, double* x, double* p, int nov, int total, long long start, long long end) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
int *shared_cptrs = (int*) &my_x[nov * block_dim]; // size = nov + 1
int *shared_rows = (int*) &shared_cptrs[nov + 1]; // size = total num of elts
T *shared_cvals = (T*) &shared_rows[total]; // size = total num of elts
for (int k = 0; k < nov; k++) {
my_x[block_dim*k + thread_id] = x[k];
shared_cptrs[k] = cptrs[k];
}
shared_cptrs[nov] = cptrs[nov];
for (int k = 0; k < total; k++) {
shared_rows[k] = rows[k];
shared_cvals[k] = cvals[k];
}
__syncthreads();
long long number_of_threads = blockDim.x * gridDim.x;
long long one = 1;
long long chunk_size = (end - start) / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
int s; //+1 or -1
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long gray = (i-1) ^ ((i-1) >> 1);
for (int k = 0; k < (nov-1); k++) {
if ((gray >> k) & 1LL) { // whether kth column should be added to x vector or not
for (int j = shared_cptrs[k]; j < shared_cptrs[k+1]; j++) {
my_x[block_dim*shared_rows[j] + thread_id] += shared_cvals[j]; // see Nijenhuis and Wilf - update x vector entries
}
}
}
prod = 1.0;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[block_dim*j + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x'
}
}
long long gray_diff;
int k;
int prodSign = 1;
if(i & 1LL) {
prodSign = -1;
}
while (i < my_end) {
gray_diff = (i ^ (i >> 1)) ^ gray;
k = __ffsll(gray_diff) - 1;
gray ^= (one << k); // Gray-code order: 1,3,2,6,7,5,4,12,13,15,...
//decide if subtract of not - if the kth bit of gray is one then 1, otherwise -1
s = ((one << k) & gray) ? 1 : -1;
for (int j = shared_cptrs[k]; j < shared_cptrs[k+1]; j++) {
if (my_x[block_dim*shared_rows[j] + thread_id] == 0) {
zero_num--;
my_x[block_dim*shared_rows[j] + thread_id] += s * shared_cvals[j]; // see Nijenhuis and Wilf - update x vector entries
prod *= my_x[block_dim*shared_rows[j] + thread_id]; //product of the elements in vector 'x'
} else {
prod /= my_x[block_dim*shared_rows[j] + thread_id];
my_x[block_dim*shared_rows[j] + thread_id] += s * shared_cvals[j]; // see Nijenhuis and Wilf - update x vector entries
if (my_x[block_dim*shared_rows[j] + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*shared_rows[j] + thread_id]; //product of the elements in vector 'x'
}
}
}
if(zero_num == 0) {
my_p += prodSign * prod;
}
prodSign *= -1;
i++;
}
p[tid] = my_p;
}
template <class T>
__global__ void kernel_xshared_coalescing_mshared_skipper(int* rptrs, int* cols, int* cptrs, int* rows, T* cvals, double* x, double* p, int nov, int total, long long start, long long end) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int thread_id = threadIdx.x;
int block_dim = blockDim.x;
extern __shared__ float shared_mem[];
float *my_x = shared_mem; // size = nov * BLOCK_SIZE
int *shared_rptrs = (int*) &my_x[nov * block_dim]; // size = nov + 1
int *shared_cols = (int*) &shared_rptrs[nov + 1]; // size = total num of elts
int *shared_cptrs = (int*) &shared_cols[total]; // size = nov + 1
int *shared_rows = (int*) &shared_cptrs[nov + 1]; // size = total num of elts
T *shared_cvals = (T*) &shared_rows[total]; // size = total num of elts
for (int k = 0; k < nov; k++) {
my_x[block_dim*k + thread_id] = x[k];
shared_rptrs[k] = rptrs[k];
shared_cptrs[k] = cptrs[k];
}
shared_rptrs[nov] = rptrs[nov];
shared_cptrs[nov] = cptrs[nov];
for (int k = 0; k < total; k++) {
shared_cols[k] = cols[k];
shared_rows[k] = rows[k];
shared_cvals[k] = cvals[k];
}
__syncthreads();
long long number_of_threads = blockDim.x * gridDim.x;
long long chunk_size = (end - start) / number_of_threads + 1;
long long my_start = start + tid * chunk_size;
long long my_end = min(start + ((tid+1) * chunk_size), end);
double prod; //product of the elements in vector 'x'
double my_p = 0;
long long i = my_start;
long long prev_gray = 0;
long long gray;
int zero_num = 0;
for (int j = 0; j < nov; j++) {
if (my_x[block_dim*j + thread_id] == 0) {
zero_num++;
} else {
prod *= my_x[block_dim*j + thread_id]; //product of the elements in vector 'x'
}
}
long long gray_diff;
unsigned long long change_j, ci, period, steps, step_start;
int j = 0;
while (i < my_end) {
gray = i ^ (i >> 1);
gray_diff = prev_gray ^ gray;
j = 0;
while(gray_diff > 0) { // this contains the bit to be updated
long long onej = 1LL << j;
if(gray_diff & onej) { // if bit l is changed
gray_diff ^= onej; // unset bit
if(gray & onej) { // do the update
for (int ptr = shared_cptrs[j]; ptr < shared_cptrs[j + 1]; ptr++) {
my_x[block_dim*shared_rows[ptr] + thread_id] += shared_cvals[ptr];
}
}
else {
for (int ptr = shared_cptrs[j]; ptr < shared_cptrs[j + 1]; ptr++) {
my_x[block_dim*shared_rows[ptr] + thread_id] -= shared_cvals[ptr];
}
}
}
j++;
}
prev_gray = gray;
int last_zero = -1;
prod = 1.0;
for(j = nov - 1; j >= 0; j--) {
prod *= my_x[block_dim*j + thread_id];
if(my_x[block_dim*j + thread_id] == 0) {
last_zero = j;
break;
}
}
if(prod != 0) {
my_p += ((i&1LL)? -1.0:1.0) * prod;
i++;
}
else {
change_j = -1;
for (int ptr = shared_rptrs[last_zero]; ptr < shared_rptrs[last_zero + 1]; ptr++) {
step_start = 1ULL << shared_cols[ptr];
period = step_start << 1;
ci = step_start;
if(i >= step_start) {
steps = (i - step_start) / period;
ci = step_start + ((steps + 1) * period);
}
if(ci < change_j) {
change_j = ci;
}
}
i++;
if(change_j > i) {
i = change_j;
}
}
}
p[tid] = my_p;
}
template <class T>
double gpu_perman64_xlocal_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
cudaSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
double stt = omp_get_wtime();
kernel_xlocal_sparse<<< grid_dim , block_dim >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov);
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
cudaSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
double stt = omp_get_wtime();
kernel_xshared_sparse<<< grid_dim , block_dim , nov*block_dim*sizeof(float) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov);
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
cudaSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
double stt = omp_get_wtime();
kernel_xshared_coalescing_sparse<<< grid_dim , block_dim , nov*block_dim*sizeof(float) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov);
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
cudaSetDevice(1);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
long long start = 1;
long long end = (1LL << (nov-1));
double stt = omp_get_wtime();
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start, end);
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpu_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num];
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p_partial[gpu_id] = 0;
}
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / gpu_num;
#pragma omp parallel for num_threads(gpu_num)
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
cudaSetDevice(gpu_id);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
double stt = omp_get_wtime();
if (gpu_id == gpu_num-1) {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + gpu_id*offset), end);
} else {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + gpu_id*offset), (start + (gpu_id+1)*offset));
}
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << gpu_id << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[gpu_id] += h_p[i];
}
delete[] h_p;
}
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p += p_partial[gpu_id];
}
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpucpu_chunks_sparse(T* mat, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, bool cpu, int threads, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num+1];
for (int id = 0; id < gpu_num+1; id++) {
p_partial[id] = 0;
}
int number_of_chunks = 1;
for (int i = 30; i < nov; i++) {
number_of_chunks *= 2;
}
int chunk_id = 0;
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / number_of_chunks;
omp_set_nested(1);
omp_set_dynamic(0);
#pragma omp parallel for num_threads(gpu_num+1)
for (int id = 0; id < gpu_num+1; id++) {
if (id == gpu_num) {
if (cpu) {
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
p_partial[id] += cpu_perman64_sparse(cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), end, threads);
} else {
p_partial[id] += cpu_perman64_sparse(cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset), threads);
}
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by CPU" << " in " << (enn - stt) << endl;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
}
} else {
cudaSetDevice(id);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), end);
} else {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset));
}
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by kernel" << id << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[id] += h_p[i];
}
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
delete[] h_p;
}
}
for (int id = 0; id < gpu_num+1; id++) {
p += p_partial[id];
}
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_skipper(T* mat, int* rptrs, int* cols, int* cptrs, int* rows, T* cvals, int nov, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
cudaSetDevice(1);
T *d_cvals;
int *d_rptrs, *d_cols, *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_rptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_cols, (total) * sizeof(int));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_rptrs, rptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cols, cols, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
long long start = 1;
long long end = (1LL << (nov-1));
double stt = omp_get_wtime();
kernel_xshared_coalescing_mshared_skipper<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + 2*(nov+1)*sizeof(int) + 2*total*sizeof(int) + total*sizeof(T)) >>> (d_rptrs, d_cols, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start, end);
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_rptrs);
cudaFree(d_cols);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p += h_p[i];
}
delete[] h_p;
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpucpu_chunks_skipper(T* mat, int* rptrs, int* cols, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, bool cpu, int threads, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num+1];
for (int id = 0; id < gpu_num+1; id++) {
p_partial[id] = 0;
}
int number_of_chunks = 1;
for (int i = 30; i < nov; i++) {
number_of_chunks *= 2;
}
int chunk_id = 0;
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / number_of_chunks;
omp_set_nested(1);
omp_set_dynamic(0);
#pragma omp parallel for num_threads(gpu_num+1)
for (int id = 0; id < gpu_num+1; id++) {
if (id == gpu_num) {
if (cpu) {
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
p_partial[id] += cpu_perman64_skipper(rptrs, cols, cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), end, threads);
} else {
p_partial[id] += cpu_perman64_skipper(rptrs, cols, cptrs, rows, cvals, x, nov, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset), threads);
}
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by CPU" << " in " << (enn - stt) << endl;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
}
} else {
cudaSetDevice(id);
T *d_cvals;
int *d_rptrs, *d_cols, *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_rptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_cols, (total) * sizeof(int));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_rptrs, rptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cols, cols, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
int curr_chunk_id;
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
while (curr_chunk_id < number_of_chunks) {
double stt = omp_get_wtime();
if (curr_chunk_id == number_of_chunks - 1) {
kernel_xshared_coalescing_mshared_skipper<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + 2*(nov+1)*sizeof(int) + 2*total*sizeof(int) + total*sizeof(T)) >>> (d_rptrs, d_cols, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), end);
} else {
kernel_xshared_coalescing_mshared_skipper<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + 2*(nov+1)*sizeof(int) + 2*total*sizeof(int) + total*sizeof(T)) >>> (d_rptrs, d_cols, d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, (start + curr_chunk_id*offset), (start + (curr_chunk_id+1)*offset));
}
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "ChunkID " << curr_chunk_id << "is DONE by kernel" << id << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[id] += h_p[i];
}
#pragma omp critical
{
curr_chunk_id = chunk_id;
chunk_id++;
}
}
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_rptrs);
cudaFree(d_cols);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
delete[] h_p;
}
}
for (int id = 0; id < gpu_num+1; id++) {
p += p_partial[id];
}
return((4*(nov&1)-2) * p);
}
template <class T>
double gpu_perman64_xshared_coalescing_mshared_multigpu_sparse_manual_distribution(T* mat, int* cptrs, int* rows, T* cvals, int nov, int gpu_num, int grid_dim, int block_dim) {
double x[nov];
double rs; //row sum
double p = 1; //product of the elements in vector 'x'
double p_partial[gpu_num];
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p_partial[gpu_id] = 0;
}
int total = 0;
//create the x vector and initiate the permanent
for (int j = 0; j < nov; j++) {
rs = .0f;
for (int k = 0; k < nov; k++) {
if (mat[(j * nov) + k] != 0) {
total++;
rs += mat[(j * nov) + k]; // sum of row j
}
}
x[j] = mat[(j * nov) + (nov-1)] - rs/2; // see Nijenhuis and Wilf - x vector entry
p *= x[j]; // product of the elements in vector 'x'
}
long long start = 1;
long long end = (1LL << (nov-1));
long long offset = (end - start) / 8;
#pragma omp parallel for num_threads(gpu_num)
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
cudaSetDevice(gpu_id);
T *d_cvals;
int *d_cptrs, *d_rows;
double *d_x, *d_p;
double *h_p = new double[grid_dim * block_dim];
cudaMalloc( &d_x, (nov) * sizeof(double));
cudaMalloc( &d_p, (grid_dim * block_dim) * sizeof(double));
cudaMalloc( &d_cptrs, (nov + 1) * sizeof(int));
cudaMalloc( &d_rows, (total) * sizeof(int));
cudaMalloc( &d_cvals, (total) * sizeof(T));
cudaMemcpy( d_x, x, (nov) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_cptrs, cptrs, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_rows, rows, (total) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( d_cvals, cvals, (total) * sizeof(T), cudaMemcpyHostToDevice);
double stt = omp_get_wtime();
if (gpu_id == 0) {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start, start + 3*offset);
} else if (gpu_id == 1) {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start + 3*offset, start + 6*offset);
} else if (gpu_id == 2) {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start + 6*offset, start + 7*offset);
} else if (gpu_id == 3) {
kernel_xshared_coalescing_mshared_sparse<<< grid_dim , block_dim , (nov*block_dim*sizeof(float) + (nov+1)*sizeof(int) + total*sizeof(int) + total*sizeof(T)) >>> (d_cptrs, d_rows, d_cvals, d_x, d_p, nov, total, start + 7*offset, end);
}
cudaDeviceSynchronize();
double enn = omp_get_wtime();
cout << "kernel" << gpu_id << " in " << (enn - stt) << endl;
cudaMemcpy( h_p, d_p, grid_dim * block_dim * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_p);
cudaFree(d_cptrs);
cudaFree(d_rows);
cudaFree(d_cvals);
for (int i = 0; i < grid_dim * block_dim; i++) {
p_partial[gpu_id] += h_p[i];
}
delete[] h_p;
}
for (int gpu_id = 0; gpu_id < gpu_num; gpu_id++) {
p += p_partial[gpu_id];
}
return((4*(nov&1)-2) * p);
} |
585f4d129ad943a8a467ac3fdf6208dfa81ed373.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA codes for Projection calculation
#ifndef __BACKPROJ_CAL_CU
#define __BACKPROJ_CAL_CU
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cutil_math.h>
#include "ParallelBeamEM.h"
texture<float, 3, hipReadModeElementType> tex_proj; // 3D texture
__global__ void backproj_cal_kernel_2D( float*, float*,
float* ,
dim3, dim3, dim3,
float, float, float, float, float,
float, float, float, float, float);
__global__ void backproj_cal_kernel_3D( float*, float*,
float* ,
dim3, dim3, dim3,
float, float, float, float, float,
float, float, float, float, float);
extern "C"
void backproj_cal_wrapper( hipArray* d_array_proj, float* d_image_prev, float* d_image_iszero,
float* d_image_curr, // output
int num_depth, int num_height, int num_width, // parameters
int num_proj, int num_elevation, int num_ray,
float lambda, float voxel_size, float proj_pixel_size,
float SOD, float inv_rot, float xoffset, float yoffset,
float zoffset, float start_rot, float end_rot){
// setup execution parameters
int blockWidth, blockHeight, blockDepth, nBlockX, nBlockY, nBlockZ;
// Setting block size
if(num_depth == 1 ) {
blockWidth = BLOCK_2DIM_X;
blockHeight = BLOCK_2DIM_Y;
blockDepth = BLOCK_2DIM_Z;
}
else {
blockWidth = BLOCK_3DIM_X;
blockHeight = BLOCK_3DIM_Y;
blockDepth = BLOCK_3DIM_Z;
}
// compute how many blocks are needed
nBlockX = (int) ceil((float)num_width / (float)blockWidth);
nBlockY = (int) ceil((float)num_height / (float)blockHeight);
nBlockZ = (int) ceil((float)num_depth / (float)blockDepth);
dim3 dimGrid(nBlockX, nBlockY*nBlockZ); // 3D grid is not supported on G80
dim3 dimBlock(blockWidth, blockHeight, blockDepth);
dim3 projdim(num_ray, num_elevation, num_proj);
dim3 griddim(nBlockX, nBlockY, nBlockZ);
dim3 imagedim( num_width, num_height, num_depth);
// set texture parameters
tex_proj.normalized = false; // access with normalized texture coordinates
tex_proj.filterMode = hipFilterModeLinear; // linear interpolation
tex_proj.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates
tex_proj.addressMode[1] = hipAddressModeClamp;
tex_proj.addressMode[2] = hipAddressModeClamp;
// bind array to 3D texture
hipChannelFormatDesc float1Desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); // float
CUDA_SAFE_CALL(hipBindTextureToArray(tex_proj, d_array_proj, float1Desc));
// execute the kernel
if( num_depth == 1 ){
hipLaunchKernelGGL(( backproj_cal_kernel_2D), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_image_prev, d_image_iszero, // input
d_image_curr, // output
projdim, griddim, imagedim,
lambda, voxel_size, proj_pixel_size, SOD,
inv_rot, xoffset, yoffset, zoffset,
start_rot, end_rot); // parameters
}
else{
hipLaunchKernelGGL(( backproj_cal_kernel_3D), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_image_prev, d_image_iszero, // input
d_image_curr, // output
projdim, griddim, imagedim,
lambda, voxel_size, proj_pixel_size, SOD,
inv_rot, xoffset, yoffset, zoffset,
start_rot, end_rot); // parameters
}
CUDA_SAFE_CALL( hipUnbindTexture( tex_proj ) );
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
}
__global__ void backproj_cal_kernel_2D( float* d_image_prev, float* d_image_iszero,
float* d_image_curr,
dim3 projdim, dim3 griddim, dim3 imagedim, float lambda,
float voxel_size, float proj_pixel_size, float SOD,
float inv_rot, float xoffset, float yoffset,
float zoffset, float start_rot, float end_rot){
// 1. initialize shared memory
dim3 bidx;
uint idx, idy, idz;
bidx.x = blockIdx.x;
bidx.z = blockIdx.y / griddim.y;
bidx.y = blockIdx.y - bidx.z*griddim.y;
idx = bidx.x * blockDim.x + threadIdx.x;
idy = bidx.y * blockDim.y + threadIdx.y;
idz = bidx.z * blockDim.z + threadIdx.z;
// shared memory
__shared__ float shmem_d_image_prev[BLOCK_2DIM_X][BLOCK_2DIM_Y][BLOCK_2DIM_Z];
#ifndef WEIGHT_CAL
__shared__ float shmem_d_image_iszero[BLOCK_2DIM_X][BLOCK_2DIM_Y][BLOCK_2DIM_Z];
#endif
dim3 cidx;
cidx.x = threadIdx.x;
cidx.y = threadIdx.y;
cidx.z = threadIdx.z;
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
// load shared memory
shmem_d_image_prev[cidx.x][cidx.y][cidx.z] = d_image_prev[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#ifndef WEIGHT_CAL
shmem_d_image_iszero[cidx.x][cidx.y][cidx.z] = d_image_iszero[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#endif
}
__syncthreads();
// 2. apply kernel
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
float pixel = 0.0f;
float i;
float x, y, z, t, u, v, angle;
float weight_voxel = 0.0f;
x = (1.0f * idx - 1.0f * imagedim.x / 2.0f) * voxel_size;
y = (1.0f * idy - 1.0f * imagedim.y / 2.0f) * voxel_size;
z = (1.0f * idz - 1.0f * imagedim.z / 2.0f) * voxel_size;
// i = projdim.z/2;
for( i = 0.0f; i < 1.0f * projdim.z; i = i + 1.0f )
{
// rotation angle (Parallel beam)
#ifdef ROTATION_CLOCKWISE
// angle = -PI * i / projdim.z;
angle = -(start_rot + i * inv_rot) * PI / 180.0f;
#else
// angle = PI * i / projdim.z + PI / 2.0f;
angle = (start_rot + i * inv_rot) * PI / 180.0f + PI / 2.0f;
#endif
u = -x * sinf( angle ) + y * cosf( angle ) ;
v = z;
u = u / proj_pixel_size + 1.0f * projdim.x / 2.0f;
v = v / proj_pixel_size + 1.0f * projdim.y / 2.0f;
if( u >= 0.0f && u < 1.0f * projdim.x && v >= 0.0f && v < 1.0f * projdim.y ){
pixel += tex3D( tex_proj, u + 0.5f, v + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp1 = tex3D( tex_proj, floorf( u ) + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp2 = tex3D( tex_proj, floorf( u ) + 1.0f + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
// interpolation in v direction is not required for parallel beam
if( fabsf( pixel_tmp1 ) > 1e-5f )
weight_voxel += 1.0f - (u - floorf( u )) ;
if( fabsf( pixel_tmp2 ) > 1e-5f )
weight_voxel += u - floorf( u );
}
}
if( weight_voxel > 0.0f )
pixel = shmem_d_image_prev[cidx.x][cidx.y][cidx.z] * pixel / weight_voxel;
else
pixel = 0.0;
#ifndef WEIGHT_CAL
pixel *= shmem_d_image_iszero[cidx.x][cidx.y][cidx.z];
#endif
#ifdef STEVE_DATA
// thresholding
if (pixel > 0.3f)
pixel = 0.3f;
#endif
__syncthreads();
// store the result
uint outidx = ((idz)* imagedim.y + (idy))*imagedim.x + idx;
d_image_curr[ outidx ] = pixel;
}
}
__global__ void backproj_cal_kernel_3D( float* d_image_prev, float* d_image_iszero,
float* d_image_curr,
dim3 projdim, dim3 griddim, dim3 imagedim, float lambda,
float voxel_size, float proj_pixel_size, float SOD,
float inv_rot, float xoffset, float yoffset,
float zoffset, float start_rot, float end_rot){
// 1. initialize shared memory
dim3 bidx;
uint idx, idy, idz;
bidx.x = blockIdx.x;
bidx.z = blockIdx.y / griddim.y;
bidx.y = blockIdx.y - bidx.z*griddim.y;
idx = bidx.x * blockDim.x + threadIdx.x;
idy = bidx.y * blockDim.y + threadIdx.y;
idz = bidx.z * blockDim.z + threadIdx.z;
// shared memory
__shared__ float shmem_d_image_prev[BLOCK_3DIM_X][BLOCK_3DIM_Y][BLOCK_3DIM_Z];
#ifndef WEIGHT_CAL
__shared__ float shmem_d_image_iszero[BLOCK_3DIM_X][BLOCK_3DIM_Y][BLOCK_3DIM_Z];
#endif
dim3 cidx;
cidx.x = threadIdx.x;
cidx.y = threadIdx.y;
cidx.z = threadIdx.z;
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
// load shared memory
shmem_d_image_prev[cidx.x][cidx.y][cidx.z] = d_image_prev[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#ifndef WEIGHT_CAL
shmem_d_image_iszero[cidx.x][cidx.y][cidx.z] = d_image_iszero[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#endif
}
__syncthreads();
// 2. apply kernel
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
float pixel = 0.0f;
float i;
float x, y, z, t, u, v, angle;
float weight_voxel = 0.0f;
x = (1.0f * idx - 1.0f * imagedim.x / 2.0f) * voxel_size;
y = (1.0f * idy - 1.0f * imagedim.y / 2.0f) * voxel_size;
z = (1.0f * idz - 1.0f * imagedim.z / 2.0f) * voxel_size;
// i = projdim.z/2;
for( i = 0.0f; i < 1.0f * projdim.z; i = i + 1.0f )
{
// rotation angle (Parallel beam)
#ifdef ROTATION_CLOCKWISE
// angle = -PI * i / projdim.z;
angle = -(start_rot + i * inv_rot) * PI / 180.0f;
#else
// angle = PI * i / projdim.z + PI / 2.0f;
angle = (start_rot + i * inv_rot) * PI / 180.0f + PI / 2.0f;
#endif
u = -x * sinf( angle ) + y * cosf( angle ) ;
v = z;
u = u / proj_pixel_size + 1.0f * projdim.x / 2.0f;
v = v / proj_pixel_size + 1.0f * projdim.y / 2.0f;
if( u >= 0.0f && u < 1.0f * projdim.x && v >= 0.0f && v < 1.0f * projdim.y ){
pixel += tex3D( tex_proj, u + 0.5f, v + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp1 = tex3D( tex_proj, floorf( u ) + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp2 = tex3D( tex_proj, floorf( u ) + 1.0f + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
// interpolation in v direction is not required for parallel beam
if( fabsf( pixel_tmp1 ) > 1e-5f )
weight_voxel += 1.0f - (u - floorf( u )) ;
if( fabsf( pixel_tmp2 ) > 1e-5f )
weight_voxel += u - floorf( u );
}
}
if( weight_voxel > 0.0f )
pixel = shmem_d_image_prev[cidx.x][cidx.y][cidx.z] * pixel / weight_voxel;
else
pixel = 0.0;
#ifndef WEIGHT_CAL
pixel *= shmem_d_image_iszero[cidx.x][cidx.y][cidx.z];
#endif
#ifdef STEVE_DATA
// thresholding
if (pixel > 0.3f)
pixel = 0.3f;
#endif
#ifdef PETER_DATA
// thresholding
if (pixel > 0.01f)
pixel = 0.01f;
#endif
__syncthreads();
// store the result
uint outidx = ((idz)* imagedim.y + (idy))*imagedim.x + idx;
d_image_curr[ outidx ] = pixel;
}
}
#endif // __BACKPROJ_CAL_CU
| 585f4d129ad943a8a467ac3fdf6208dfa81ed373.cu |
// CUDA codes for Projection calculation
#ifndef __BACKPROJ_CAL_CU
#define __BACKPROJ_CAL_CU
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include <cutil.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cutil_math.h>
#include "ParallelBeamEM.h"
texture<float, 3, cudaReadModeElementType> tex_proj; // 3D texture
__global__ void backproj_cal_kernel_2D( float*, float*,
float* ,
dim3, dim3, dim3,
float, float, float, float, float,
float, float, float, float, float);
__global__ void backproj_cal_kernel_3D( float*, float*,
float* ,
dim3, dim3, dim3,
float, float, float, float, float,
float, float, float, float, float);
extern "C"
void backproj_cal_wrapper( cudaArray* d_array_proj, float* d_image_prev, float* d_image_iszero,
float* d_image_curr, // output
int num_depth, int num_height, int num_width, // parameters
int num_proj, int num_elevation, int num_ray,
float lambda, float voxel_size, float proj_pixel_size,
float SOD, float inv_rot, float xoffset, float yoffset,
float zoffset, float start_rot, float end_rot){
// setup execution parameters
int blockWidth, blockHeight, blockDepth, nBlockX, nBlockY, nBlockZ;
// Setting block size
if(num_depth == 1 ) {
blockWidth = BLOCK_2DIM_X;
blockHeight = BLOCK_2DIM_Y;
blockDepth = BLOCK_2DIM_Z;
}
else {
blockWidth = BLOCK_3DIM_X;
blockHeight = BLOCK_3DIM_Y;
blockDepth = BLOCK_3DIM_Z;
}
// compute how many blocks are needed
nBlockX = (int) ceil((float)num_width / (float)blockWidth);
nBlockY = (int) ceil((float)num_height / (float)blockHeight);
nBlockZ = (int) ceil((float)num_depth / (float)blockDepth);
dim3 dimGrid(nBlockX, nBlockY*nBlockZ); // 3D grid is not supported on G80
dim3 dimBlock(blockWidth, blockHeight, blockDepth);
dim3 projdim(num_ray, num_elevation, num_proj);
dim3 griddim(nBlockX, nBlockY, nBlockZ);
dim3 imagedim( num_width, num_height, num_depth);
// set texture parameters
tex_proj.normalized = false; // access with normalized texture coordinates
tex_proj.filterMode = cudaFilterModeLinear; // linear interpolation
tex_proj.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates
tex_proj.addressMode[1] = cudaAddressModeClamp;
tex_proj.addressMode[2] = cudaAddressModeClamp;
// bind array to 3D texture
cudaChannelFormatDesc float1Desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); // float
CUDA_SAFE_CALL(cudaBindTextureToArray(tex_proj, d_array_proj, float1Desc));
// execute the kernel
if( num_depth == 1 ){
backproj_cal_kernel_2D<<< dimGrid, dimBlock >>>( d_image_prev, d_image_iszero, // input
d_image_curr, // output
projdim, griddim, imagedim,
lambda, voxel_size, proj_pixel_size, SOD,
inv_rot, xoffset, yoffset, zoffset,
start_rot, end_rot); // parameters
}
else{
backproj_cal_kernel_3D<<< dimGrid, dimBlock >>>( d_image_prev, d_image_iszero, // input
d_image_curr, // output
projdim, griddim, imagedim,
lambda, voxel_size, proj_pixel_size, SOD,
inv_rot, xoffset, yoffset, zoffset,
start_rot, end_rot); // parameters
}
CUDA_SAFE_CALL( cudaUnbindTexture( tex_proj ) );
// check if kernel execution generated and error
CUT_CHECK_ERROR("Kernel execution failed");
}
__global__ void backproj_cal_kernel_2D( float* d_image_prev, float* d_image_iszero,
float* d_image_curr,
dim3 projdim, dim3 griddim, dim3 imagedim, float lambda,
float voxel_size, float proj_pixel_size, float SOD,
float inv_rot, float xoffset, float yoffset,
float zoffset, float start_rot, float end_rot){
// 1. initialize shared memory
dim3 bidx;
uint idx, idy, idz;
bidx.x = blockIdx.x;
bidx.z = blockIdx.y / griddim.y;
bidx.y = blockIdx.y - bidx.z*griddim.y;
idx = bidx.x * blockDim.x + threadIdx.x;
idy = bidx.y * blockDim.y + threadIdx.y;
idz = bidx.z * blockDim.z + threadIdx.z;
// shared memory
__shared__ float shmem_d_image_prev[BLOCK_2DIM_X][BLOCK_2DIM_Y][BLOCK_2DIM_Z];
#ifndef WEIGHT_CAL
__shared__ float shmem_d_image_iszero[BLOCK_2DIM_X][BLOCK_2DIM_Y][BLOCK_2DIM_Z];
#endif
dim3 cidx;
cidx.x = threadIdx.x;
cidx.y = threadIdx.y;
cidx.z = threadIdx.z;
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
// load shared memory
shmem_d_image_prev[cidx.x][cidx.y][cidx.z] = d_image_prev[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#ifndef WEIGHT_CAL
shmem_d_image_iszero[cidx.x][cidx.y][cidx.z] = d_image_iszero[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#endif
}
__syncthreads();
// 2. apply kernel
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
float pixel = 0.0f;
float i;
float x, y, z, t, u, v, angle;
float weight_voxel = 0.0f;
x = (1.0f * idx - 1.0f * imagedim.x / 2.0f) * voxel_size;
y = (1.0f * idy - 1.0f * imagedim.y / 2.0f) * voxel_size;
z = (1.0f * idz - 1.0f * imagedim.z / 2.0f) * voxel_size;
// i = projdim.z/2;
for( i = 0.0f; i < 1.0f * projdim.z; i = i + 1.0f )
{
// rotation angle (Parallel beam)
#ifdef ROTATION_CLOCKWISE
// angle = -PI * i / projdim.z;
angle = -(start_rot + i * inv_rot) * PI / 180.0f;
#else
// angle = PI * i / projdim.z + PI / 2.0f;
angle = (start_rot + i * inv_rot) * PI / 180.0f + PI / 2.0f;
#endif
u = -x * sinf( angle ) + y * cosf( angle ) ;
v = z;
u = u / proj_pixel_size + 1.0f * projdim.x / 2.0f;
v = v / proj_pixel_size + 1.0f * projdim.y / 2.0f;
if( u >= 0.0f && u < 1.0f * projdim.x && v >= 0.0f && v < 1.0f * projdim.y ){
pixel += tex3D( tex_proj, u + 0.5f, v + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp1 = tex3D( tex_proj, floorf( u ) + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp2 = tex3D( tex_proj, floorf( u ) + 1.0f + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
// interpolation in v direction is not required for parallel beam
if( fabsf( pixel_tmp1 ) > 1e-5f )
weight_voxel += 1.0f - (u - floorf( u )) ;
if( fabsf( pixel_tmp2 ) > 1e-5f )
weight_voxel += u - floorf( u );
}
}
if( weight_voxel > 0.0f )
pixel = shmem_d_image_prev[cidx.x][cidx.y][cidx.z] * pixel / weight_voxel;
else
pixel = 0.0;
#ifndef WEIGHT_CAL
pixel *= shmem_d_image_iszero[cidx.x][cidx.y][cidx.z];
#endif
#ifdef STEVE_DATA
// thresholding
if (pixel > 0.3f)
pixel = 0.3f;
#endif
__syncthreads();
// store the result
uint outidx = ((idz)* imagedim.y + (idy))*imagedim.x + idx;
d_image_curr[ outidx ] = pixel;
}
}
__global__ void backproj_cal_kernel_3D( float* d_image_prev, float* d_image_iszero,
float* d_image_curr,
dim3 projdim, dim3 griddim, dim3 imagedim, float lambda,
float voxel_size, float proj_pixel_size, float SOD,
float inv_rot, float xoffset, float yoffset,
float zoffset, float start_rot, float end_rot){
// 1. initialize shared memory
dim3 bidx;
uint idx, idy, idz;
bidx.x = blockIdx.x;
bidx.z = blockIdx.y / griddim.y;
bidx.y = blockIdx.y - bidx.z*griddim.y;
idx = bidx.x * blockDim.x + threadIdx.x;
idy = bidx.y * blockDim.y + threadIdx.y;
idz = bidx.z * blockDim.z + threadIdx.z;
// shared memory
__shared__ float shmem_d_image_prev[BLOCK_3DIM_X][BLOCK_3DIM_Y][BLOCK_3DIM_Z];
#ifndef WEIGHT_CAL
__shared__ float shmem_d_image_iszero[BLOCK_3DIM_X][BLOCK_3DIM_Y][BLOCK_3DIM_Z];
#endif
dim3 cidx;
cidx.x = threadIdx.x;
cidx.y = threadIdx.y;
cidx.z = threadIdx.z;
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
// load shared memory
shmem_d_image_prev[cidx.x][cidx.y][cidx.z] = d_image_prev[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#ifndef WEIGHT_CAL
shmem_d_image_iszero[cidx.x][cidx.y][cidx.z] = d_image_iszero[((idz)*imagedim.y + (idy))*imagedim.x + idx];
#endif
}
__syncthreads();
// 2. apply kernel
if(idx < imagedim.x && idy < imagedim.y && idz < imagedim.z)
{
float pixel = 0.0f;
float i;
float x, y, z, t, u, v, angle;
float weight_voxel = 0.0f;
x = (1.0f * idx - 1.0f * imagedim.x / 2.0f) * voxel_size;
y = (1.0f * idy - 1.0f * imagedim.y / 2.0f) * voxel_size;
z = (1.0f * idz - 1.0f * imagedim.z / 2.0f) * voxel_size;
// i = projdim.z/2;
for( i = 0.0f; i < 1.0f * projdim.z; i = i + 1.0f )
{
// rotation angle (Parallel beam)
#ifdef ROTATION_CLOCKWISE
// angle = -PI * i / projdim.z;
angle = -(start_rot + i * inv_rot) * PI / 180.0f;
#else
// angle = PI * i / projdim.z + PI / 2.0f;
angle = (start_rot + i * inv_rot) * PI / 180.0f + PI / 2.0f;
#endif
u = -x * sinf( angle ) + y * cosf( angle ) ;
v = z;
u = u / proj_pixel_size + 1.0f * projdim.x / 2.0f;
v = v / proj_pixel_size + 1.0f * projdim.y / 2.0f;
if( u >= 0.0f && u < 1.0f * projdim.x && v >= 0.0f && v < 1.0f * projdim.y ){
pixel += tex3D( tex_proj, u + 0.5f, v + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp1 = tex3D( tex_proj, floorf( u ) + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
float pixel_tmp2 = tex3D( tex_proj, floorf( u ) + 1.0f + 0.5f, floorf( v ) + 0.5f, 1.0f * i + 0.5f );
// interpolation in v direction is not required for parallel beam
if( fabsf( pixel_tmp1 ) > 1e-5f )
weight_voxel += 1.0f - (u - floorf( u )) ;
if( fabsf( pixel_tmp2 ) > 1e-5f )
weight_voxel += u - floorf( u );
}
}
if( weight_voxel > 0.0f )
pixel = shmem_d_image_prev[cidx.x][cidx.y][cidx.z] * pixel / weight_voxel;
else
pixel = 0.0;
#ifndef WEIGHT_CAL
pixel *= shmem_d_image_iszero[cidx.x][cidx.y][cidx.z];
#endif
#ifdef STEVE_DATA
// thresholding
if (pixel > 0.3f)
pixel = 0.3f;
#endif
#ifdef PETER_DATA
// thresholding
if (pixel > 0.01f)
pixel = 0.01f;
#endif
__syncthreads();
// store the result
uint outidx = ((idz)* imagedim.y + (idy))*imagedim.x + idx;
d_image_curr[ outidx ] = pixel;
}
}
#endif // __BACKPROJ_CAL_CU
|
703bd70864241edbbfab0623a0abd73a00ae4cad.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "rsub_scalar_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
double dx = 1;
double *dy = NULL;
hipMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
rsub_scalar_double), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dx,dy,incy,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
rsub_scalar_double), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
rsub_scalar_double), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 703bd70864241edbbfab0623a0abd73a00ae4cad.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "rsub_scalar_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
double dx = 1;
double *dy = NULL;
cudaMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
rsub_scalar_double<<<gridBlock,threadBlock>>>(n,idx,dx,dy,incy,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
rsub_scalar_double<<<gridBlock,threadBlock>>>(n,idx,dx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
rsub_scalar_double<<<gridBlock,threadBlock>>>(n,idx,dx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e38d4dc9da9cb16ba1452fb6aecd9463e391b0d0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include "dropout.cuh"
#include "softmax.cuh"
#include "strided_batched_gemm_hip.cuh"
namespace multihead_attn {
namespace self_bias_additive_mask {
namespace cublas_gemmex {
std::vector<torch::Tensor> fwd_cuda(bool use_time_mask, bool is_training,
int heads, torch::Tensor const &inputs,
torch::Tensor const &input_weights,
torch::Tensor const &output_weights,
torch::Tensor const &input_biases,
torch::Tensor const &output_biases,
const half *pad_mask, float dropout_prob) {
const int embed_dim = inputs.size(2);
const int sequences = inputs.size(1);
const int q_seq_len = inputs.size(0);
const int k_seq_len = q_seq_len;
const int batches = sequences * q_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_dim = 3 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim = attn_batches * 3 * head_dim;
const int batch_stride = 3 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta_zero = 0.0;
const float beta_one = 1.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated
// by ATen library code)
auto act_options = inputs.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_results =
torch::empty({q_seq_len, sequences, output_lin_dim}, act_options);
torch::Tensor bmm1_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask =
torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results =
torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void *q_lin_results_ptr = static_cast<void *>(input_lin_results.data_ptr());
void *k_lin_results_ptr = static_cast<void *>(
static_cast<half *>(input_lin_results.data_ptr()) + head_dim);
void *v_lin_results_ptr = static_cast<void *>(
static_cast<half *>(input_lin_results.data_ptr()) + 2 * head_dim);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void *bmm1_results_ptr = static_cast<void *>(bmm1_results.data_ptr());
void *dropout_results_ptr = static_cast<void *>(dropout_results.data_ptr());
char a_layout_t{'t'};
char a_layout_n{'n'};
char b_layout_n{'n'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Input Linear Fwd
input_lin_results.copy_(input_biases);
TORCH_CUDABLAS_CHECK(hipblasGemmEx(
handle, HIPBLAS_OP_T, HIPBLAS_OP_N, output_lin_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(input_weights.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(inputs.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(&beta_one), q_lin_results_ptr,
HIP_R_16F, output_lin_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
gemm_switch_fp32accum(
a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, scale,
static_cast<const half *>(k_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(q_lin_results_ptr), lead_dim, batch_stride,
beta_zero, static_cast<half *>(bmm1_results_ptr), k_seq_len,
k_seq_len * q_seq_len, attn_batches);
// Padded Softmax
bool softmax_success = false;
if (is_training) {
softmax_success =
dispatch_additive_masked_softmax_dropout<half, half, float>(
reinterpret_cast<half *>(dropout_results_ptr),
(is_training)
? reinterpret_cast<uint8_t *>(dropout_mask.data_ptr<uint8_t>())
: nullptr,
reinterpret_cast<const half *>(bmm1_results_ptr), pad_mask,
attn_batches * q_seq_len * q_seq_len, k_seq_len, k_seq_len,
attn_batches * q_seq_len, attn_batches * q_seq_len / sequences,
1.0f - dropout_prob, stream);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half *>(
dropout_results_ptr), // this is actually softmax results, but
// making it consistent for the next function
reinterpret_cast<const half *>(bmm1_results_ptr), pad_mask, k_seq_len,
k_seq_len, attn_batches * q_seq_len,
attn_batches * q_seq_len / sequences);
}
// Matmul2
gemm_switch_fp32accum(
a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, alpha,
static_cast<const half *>(v_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(dropout_results.data_ptr()), k_seq_len,
k_seq_len * q_seq_len, beta_zero,
static_cast<half *>(matmul2_results.data_ptr()), head_dim * attn_batches,
head_dim, attn_batches);
outputs.copy_(output_biases);
// Output Linear
TORCH_CUDABLAS_CHECK(hipblasGemmEx(
handle, HIPBLAS_OP_T, HIPBLAS_OP_N, embed_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(output_weights.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(matmul2_results.data_ptr()),
HIP_R_16F, embed_dim, static_cast<const void *>(&beta_one),
static_cast<void *>(outputs.data_ptr()), HIP_R_16F, embed_dim,
HIP_R_32F,
// CUBLAS_GEMM_ALGO1_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {input_lin_results, bmm1_results, dropout_results,
dropout_mask, matmul2_results, outputs};
}
std::vector<torch::Tensor> bwd_cuda(
int heads, torch::Tensor const &output_grads,
torch::Tensor const &matmul2_results, torch::Tensor const &dropout_results,
torch::Tensor const &bmm1_results, torch::Tensor const &pad_mask,
torch::Tensor const &input_lin_results, torch::Tensor const &inputs,
torch::Tensor const &input_weights, torch::Tensor const &output_weights,
torch::Tensor const &dropout_mask, float dropout_prob) {
const int embed_dim = inputs.size(2);
const int sequences = inputs.size(1);
const int q_seq_len = inputs.size(0);
const int k_seq_len = q_seq_len;
const int batches = sequences * q_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_dim = 3 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim = attn_batches * 3 * head_dim;
const int batch_stride = 3 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_grads = torch::empty_like(inputs);
torch::Tensor input_weight_grads = torch::empty_like(input_weights);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_output_grads = torch::empty_like(input_lin_results);
auto q_lin_results_ptr = static_cast<half *>(input_lin_results.data_ptr());
auto k_lin_results_ptr =
static_cast<half *>(input_lin_results.data_ptr()) + head_dim;
auto v_lin_results_ptr =
static_cast<half *>(input_lin_results.data_ptr()) + 2 * head_dim;
auto q_lin_grads_ptr = static_cast<half *>(input_lin_output_grads.data_ptr());
auto k_lin_grads_ptr =
static_cast<half *>(input_lin_output_grads.data_ptr()) + head_dim;
auto v_lin_grads_ptr =
static_cast<half *>(input_lin_output_grads.data_ptr()) + 2 * head_dim;
char a_layout_n{'n'};
char a_layout_t{'t'};
char b_layout_n{'n'};
char b_layout_t{'t'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
TORCH_CUDABLAS_CHECK(hipblasGemmEx(
handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(output_weights.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(output_grads.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(&beta),
static_cast<void *>(output_lin_grads.data_ptr()), HIP_R_16F, embed_dim,
HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
TORCH_CUDABLAS_CHECK(hipblasGemmEx(
handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, embed_dim, batches,
static_cast<const void *>(&alpha),
static_cast<const void *>(matmul2_results.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(output_grads.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(&beta),
static_cast<void *>(output_weight_grads.data_ptr()), HIP_R_16F,
embed_dim, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
auto output_bias_grads = output_grads.view({-1, embed_dim}).sum(0, false);
// MatMul2 Dgrad1
gemm_switch_fp32accum(
a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha,
static_cast<const half *>(v_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(output_lin_grads.data_ptr()),
head_dim * attn_batches, head_dim, beta,
static_cast<half *>(matmul2_grads.data_ptr()), k_seq_len,
k_seq_len * q_seq_len, attn_batches);
// Matmul2 Dgrad2
gemm_switch_fp32accum(a_layout_n, b_layout_t, head_dim, k_seq_len,
q_seq_len, alpha,
static_cast<const half *>(output_lin_grads.data_ptr()),
head_dim * attn_batches, head_dim,
static_cast<const half *>(dropout_results.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, v_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>(
static_cast<half *>(matmul2_grads.data_ptr()),
static_cast<half *const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const *>(bmm1_results.data_ptr()),
reinterpret_cast<half const *>(pad_mask.data_ptr()),
static_cast<uint8_t const *>(dropout_mask.data_ptr()),
1.0 / (1.0 - dropout_prob), k_seq_len, k_seq_len,
attn_batches * q_seq_len / sequences, attn_batches * q_seq_len, stream);
// Matmul1 Dgrad1
gemm_switch_fp32accum(a_layout_n, b_layout_n, head_dim, q_seq_len,
k_seq_len, scale, k_lin_results_ptr, lead_dim,
batch_stride,
static_cast<half *>(matmul2_grads.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, q_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Matmul1 Dgrad2
gemm_switch_fp32accum(a_layout_n, b_layout_t, head_dim, k_seq_len,
q_seq_len, scale, q_lin_results_ptr, lead_dim,
batch_stride,
static_cast<half *>(matmul2_grads.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, k_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Input Linear Dgrad
TORCH_CUDABLAS_CHECK(hipblasGemmEx(
handle, HIPBLAS_OP_N, HIPBLAS_OP_N, embed_dim, batches, output_lin_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(input_weights.data_ptr()), HIP_R_16F,
embed_dim, static_cast<const void *>(input_lin_output_grads.data_ptr()),
// static_cast<const void*>(q_lin_grads_ptr),
HIP_R_16F, output_lin_dim, static_cast<const void *>(&beta),
static_cast<void *>(input_grads.data_ptr()), HIP_R_16F, embed_dim,
HIP_R_32F,
// CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Wgrad
TORCH_CUDABLAS_CHECK(hipblasGemmEx(
handle, HIPBLAS_OP_N, HIPBLAS_OP_T, embed_dim, output_lin_dim, batches,
static_cast<const void *>(&alpha),
static_cast<const void *>(inputs.data_ptr()), HIP_R_16F, embed_dim,
static_cast<const void *>(q_lin_grads_ptr), HIP_R_16F, output_lin_dim,
static_cast<const void *>(&beta),
static_cast<void *>(input_weight_grads.data_ptr()), HIP_R_16F, embed_dim,
HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
auto input_bias_grads =
input_lin_output_grads.view({-1, output_lin_dim}).sum(0, false);
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {input_grads, input_weight_grads, output_weight_grads,
input_bias_grads, output_bias_grads};
}
} // end namespace cublas_gemmex
} // namespace self_bias_additive_mask
} // end namespace multihead_attn
| e38d4dc9da9cb16ba1452fb6aecd9463e391b0d0.cu | #include <iostream>
#include <math.h>
#include <vector>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "dropout.cuh"
#include "softmax.cuh"
#include "strided_batched_gemm.cuh"
namespace multihead_attn {
namespace self_bias_additive_mask {
namespace cublas_gemmex {
std::vector<torch::Tensor> fwd_cuda(bool use_time_mask, bool is_training,
int heads, torch::Tensor const &inputs,
torch::Tensor const &input_weights,
torch::Tensor const &output_weights,
torch::Tensor const &input_biases,
torch::Tensor const &output_biases,
const half *pad_mask, float dropout_prob) {
const int embed_dim = inputs.size(2);
const int sequences = inputs.size(1);
const int q_seq_len = inputs.size(0);
const int k_seq_len = q_seq_len;
const int batches = sequences * q_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_dim = 3 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim = attn_batches * 3 * head_dim;
const int batch_stride = 3 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta_zero = 0.0;
const float beta_one = 1.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated
// by ATen library code)
auto act_options = inputs.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_results =
torch::empty({q_seq_len, sequences, output_lin_dim}, act_options);
torch::Tensor bmm1_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results =
torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask =
torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results =
torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void *q_lin_results_ptr = static_cast<void *>(input_lin_results.data_ptr());
void *k_lin_results_ptr = static_cast<void *>(
static_cast<half *>(input_lin_results.data_ptr()) + head_dim);
void *v_lin_results_ptr = static_cast<void *>(
static_cast<half *>(input_lin_results.data_ptr()) + 2 * head_dim);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void *bmm1_results_ptr = static_cast<void *>(bmm1_results.data_ptr());
void *dropout_results_ptr = static_cast<void *>(dropout_results.data_ptr());
char a_layout_t{'t'};
char a_layout_n{'n'};
char b_layout_n{'n'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Input Linear Fwd
input_lin_results.copy_(input_biases);
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_T, CUBLAS_OP_N, output_lin_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(input_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(inputs.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(&beta_one), q_lin_results_ptr,
CUDA_R_16F, output_lin_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
gemm_switch_fp32accum(
a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, scale,
static_cast<const half *>(k_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(q_lin_results_ptr), lead_dim, batch_stride,
beta_zero, static_cast<half *>(bmm1_results_ptr), k_seq_len,
k_seq_len * q_seq_len, attn_batches);
// Padded Softmax
bool softmax_success = false;
if (is_training) {
softmax_success =
dispatch_additive_masked_softmax_dropout<half, half, float>(
reinterpret_cast<half *>(dropout_results_ptr),
(is_training)
? reinterpret_cast<uint8_t *>(dropout_mask.data_ptr<uint8_t>())
: nullptr,
reinterpret_cast<const half *>(bmm1_results_ptr), pad_mask,
attn_batches * q_seq_len * q_seq_len, k_seq_len, k_seq_len,
attn_batches * q_seq_len, attn_batches * q_seq_len / sequences,
1.0f - dropout_prob, stream);
} else {
softmax_success = dispatch_additive_masked_softmax<half, half, float>(
reinterpret_cast<half *>(
dropout_results_ptr), // this is actually softmax results, but
// making it consistent for the next function
reinterpret_cast<const half *>(bmm1_results_ptr), pad_mask, k_seq_len,
k_seq_len, attn_batches * q_seq_len,
attn_batches * q_seq_len / sequences);
}
// Matmul2
gemm_switch_fp32accum(
a_layout_n, b_layout_n, head_dim, q_seq_len, k_seq_len, alpha,
static_cast<const half *>(v_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(dropout_results.data_ptr()), k_seq_len,
k_seq_len * q_seq_len, beta_zero,
static_cast<half *>(matmul2_results.data_ptr()), head_dim * attn_batches,
head_dim, attn_batches);
outputs.copy_(output_biases);
// Output Linear
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_T, CUBLAS_OP_N, embed_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(output_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(matmul2_results.data_ptr()),
CUDA_R_16F, embed_dim, static_cast<const void *>(&beta_one),
static_cast<void *>(outputs.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F,
// CUBLAS_GEMM_ALGO1_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {input_lin_results, bmm1_results, dropout_results,
dropout_mask, matmul2_results, outputs};
}
std::vector<torch::Tensor> bwd_cuda(
int heads, torch::Tensor const &output_grads,
torch::Tensor const &matmul2_results, torch::Tensor const &dropout_results,
torch::Tensor const &bmm1_results, torch::Tensor const &pad_mask,
torch::Tensor const &input_lin_results, torch::Tensor const &inputs,
torch::Tensor const &input_weights, torch::Tensor const &output_weights,
torch::Tensor const &dropout_mask, float dropout_prob) {
const int embed_dim = inputs.size(2);
const int sequences = inputs.size(1);
const int q_seq_len = inputs.size(0);
const int k_seq_len = q_seq_len;
const int batches = sequences * q_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_dim = 3 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim = attn_batches * 3 * head_dim;
const int batch_stride = 3 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_grads = torch::empty_like(inputs);
torch::Tensor input_weight_grads = torch::empty_like(input_weights);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_output_grads = torch::empty_like(input_lin_results);
auto q_lin_results_ptr = static_cast<half *>(input_lin_results.data_ptr());
auto k_lin_results_ptr =
static_cast<half *>(input_lin_results.data_ptr()) + head_dim;
auto v_lin_results_ptr =
static_cast<half *>(input_lin_results.data_ptr()) + 2 * head_dim;
auto q_lin_grads_ptr = static_cast<half *>(input_lin_output_grads.data_ptr());
auto k_lin_grads_ptr =
static_cast<half *>(input_lin_output_grads.data_ptr()) + head_dim;
auto v_lin_grads_ptr =
static_cast<half *>(input_lin_output_grads.data_ptr()) + 2 * head_dim;
char a_layout_n{'n'};
char a_layout_t{'t'};
char b_layout_n{'n'};
char b_layout_t{'t'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches, embed_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(output_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(output_grads.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(&beta),
static_cast<void *>(output_lin_grads.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, embed_dim, batches,
static_cast<const void *>(&alpha),
static_cast<const void *>(matmul2_results.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(output_grads.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(&beta),
static_cast<void *>(output_weight_grads.data_ptr()), CUDA_R_16F,
embed_dim, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
auto output_bias_grads = output_grads.view({-1, embed_dim}).sum(0, false);
// MatMul2 Dgrad1
gemm_switch_fp32accum(
a_layout_t, b_layout_n, k_seq_len, q_seq_len, head_dim, alpha,
static_cast<const half *>(v_lin_results_ptr), lead_dim, batch_stride,
static_cast<const half *>(output_lin_grads.data_ptr()),
head_dim * attn_batches, head_dim, beta,
static_cast<half *>(matmul2_grads.data_ptr()), k_seq_len,
k_seq_len * q_seq_len, attn_batches);
// Matmul2 Dgrad2
gemm_switch_fp32accum(a_layout_n, b_layout_t, head_dim, k_seq_len,
q_seq_len, alpha,
static_cast<const half *>(output_lin_grads.data_ptr()),
head_dim * attn_batches, head_dim,
static_cast<const half *>(dropout_results.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, v_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Apply Dropout Mask and Scale by Dropout Probability
// Softmax Grad
dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>(
static_cast<half *>(matmul2_grads.data_ptr()),
static_cast<half *const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const *>(bmm1_results.data_ptr()),
reinterpret_cast<half const *>(pad_mask.data_ptr()),
static_cast<uint8_t const *>(dropout_mask.data_ptr()),
1.0 / (1.0 - dropout_prob), k_seq_len, k_seq_len,
attn_batches * q_seq_len / sequences, attn_batches * q_seq_len, stream);
// Matmul1 Dgrad1
gemm_switch_fp32accum(a_layout_n, b_layout_n, head_dim, q_seq_len,
k_seq_len, scale, k_lin_results_ptr, lead_dim,
batch_stride,
static_cast<half *>(matmul2_grads.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, q_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Matmul1 Dgrad2
gemm_switch_fp32accum(a_layout_n, b_layout_t, head_dim, k_seq_len,
q_seq_len, scale, q_lin_results_ptr, lead_dim,
batch_stride,
static_cast<half *>(matmul2_grads.data_ptr()),
k_seq_len, k_seq_len * q_seq_len, beta, k_lin_grads_ptr,
lead_dim, batch_stride, attn_batches);
// Input Linear Dgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_N, embed_dim, batches, output_lin_dim,
static_cast<const void *>(&alpha),
static_cast<const void *>(input_weights.data_ptr()), CUDA_R_16F,
embed_dim, static_cast<const void *>(input_lin_output_grads.data_ptr()),
// static_cast<const void*>(q_lin_grads_ptr),
CUDA_R_16F, output_lin_dim, static_cast<const void *>(&beta),
static_cast<void *>(input_grads.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F,
// CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Wgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(
handle, CUBLAS_OP_N, CUBLAS_OP_T, embed_dim, output_lin_dim, batches,
static_cast<const void *>(&alpha),
static_cast<const void *>(inputs.data_ptr()), CUDA_R_16F, embed_dim,
static_cast<const void *>(q_lin_grads_ptr), CUDA_R_16F, output_lin_dim,
static_cast<const void *>(&beta),
static_cast<void *>(input_weight_grads.data_ptr()), CUDA_R_16F, embed_dim,
CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
auto input_bias_grads =
input_lin_output_grads.view({-1, output_lin_dim}).sum(0, false);
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {input_grads, input_weight_grads, output_weight_grads,
input_bias_grads, output_bias_grads};
}
} // end namespace cublas_gemmex
} // namespace self_bias_additive_mask
} // end namespace multihead_attn
|
ab57d38e098cb64110df8c11c5244b423acd3d85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <cmath>
#include "wfPad.h"
#include "timer.h"
extern "C"
{
typedef thrust::complex<float> fcomp;
__global__ void copyPadded(fcomp * paste, fcomp * copyied, int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixelIdx_y = blockIdx.y * blockDim.y + threadIdx.y;
if(pixelIdx_x < nx && pixelIdx_y < nf){
int pixelIdx = pixelIdx_y * dim_x + pixelIdx_x + M;
paste[pixelIdx] = copyied[pixelIdx];
}
}
__global__ void imaging(fcomp * image, fcomp * forw_pulse, fcomp * back_pulse, int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
fcomp conv;
for(int j=0; j<nf; j++){
int Idx = j * dim_x + pixelIdx_x + M;
conv += forw_pulse[Idx] * thrust::conj(back_pulse[Idx]);
}
image[pixelIdx_x] = conv;
}
__global__ void extrapDepths(fcomp * forw_pulse_new, fcomp * back_pulse_new, \
int nf, int nx, int M, \
fcomp * forw_w_op, fcomp * back_w_op, \
fcomp * forw_pulse_old, fcomp * back_pulse_old)
{
int length_M = 2*M+1;
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixelIdx_y = blockIdx.y * blockDim.y + threadIdx.y;
fcomp pixel_forw = fcomp(0.0,0.0); //set pixels into register memory for faster access
fcomp pixel_back = fcomp(0.0,0.0);
if(pixelIdx_x < nx && pixelIdx_y < nf){
int op_loc = pixelIdx_x * length_M;// operator's spatial location index
int op_freq = pixelIdx_y * nx * length_M;// operator's frequency index
int opIdx = op_loc + op_freq; //operator's starting location
for (int k=0; k<length_M; ++k){
int elemIdx = pixelIdx_y * dim_x + pixelIdx_x + k;
pixel_forw += forw_w_op[opIdx + k] * forw_pulse_old[elemIdx];
pixel_back += back_w_op[opIdx + k] * back_pulse_old[elemIdx];
}
int pixelIdx = pixelIdx_y * dim_x + pixelIdx_x + M;
forw_pulse_new[pixelIdx] = pixel_forw;
back_pulse_new[pixelIdx] = pixel_back;
}
} // end extrapolation to next depth
void extrapolate(int ns, int nextrap, int nz, int nt, int nf, int nx, int M,\
fcomp * w_op_forw, fcomp * forw_pulse, fcomp * w_op_back, fcomp * back_pulse,\
float * image)
{
//define important dimensionality parameters
int length_M = 2*M+1;
int dim_x = nx+2*M;
size_t sizePulse = nf * dim_x;
size_t sizeAllSources = ns * sizePulse;
size_t sizeImage = nz * nx;
size_t sizeAllImages = ns * sizeImage;
size_t sizeOp = nextrap * nf * nx * length_M;
timer t0("EXTRAPOLATION");
timer t1("CONSTRUCT PADDED WAVEFIELDS");
timer t2("READ IMAGES");
//allocate host memory
t1.start();
fcomp * h_image = new fcomp[sizeAllImages];
std::vector<wfpad> h_forw_pulses(ns);
std::vector<wfpad> h_back_pulses(ns);
for(int is=0; is<ns; ++is){
h_forw_pulses[is] = wfpad(nf, nx, 1, M, 0, &forw_pulse[is*nt*nx]);
h_back_pulses[is] = wfpad(nf, nx, 1, M, 0, &back_pulse[is*nt*nx]);
}
t1.stop();
//define device pointers and allocate memory
fcomp * d_forw_pulse, * d_forw_pulse_new;
fcomp * d_back_pulse, * d_back_pulse_new;
fcomp * d_w_op_forw, * d_w_op_back;
fcomp * d_image;
hipMalloc(&d_forw_pulse, sizeAllSources * sizeof(fcomp));
hipMalloc(&d_forw_pulse_new, sizeAllSources * sizeof(fcomp));
hipMalloc(&d_back_pulse, sizeAllSources * sizeof(fcomp));
hipMalloc(&d_back_pulse_new, sizeAllSources * sizeof(fcomp));
hipMalloc(&d_w_op_forw, sizeOp * sizeof(fcomp));
hipMalloc(&d_w_op_back, sizeOp * sizeof(fcomp));
hipMalloc(&d_image, sizeAllImages * sizeof(fcomp));
//copy operators and wavefields on device
hipMemcpy(d_w_op_forw, w_op_forw, sizeOp * sizeof(fcomp), hipMemcpyHostToDevice);
hipMemcpy(d_w_op_back, w_op_back, sizeOp * sizeof(fcomp), hipMemcpyHostToDevice);
//define number of blocks and number of threads per block
dim3 nThreads(16, 1, 1);
size_t nBlocks_x = nx % nThreads.x == 0 ? size_t(nx/nThreads.x) : size_t(1 + nx/nThreads.x);
size_t nBlocks_y = nf % nThreads.y == 0 ? size_t(nf/nThreads.y) : size_t(1 + nf/nThreads.y);
size_t nBlocks_z = 1;
dim3 nBlocks(nBlocks_x, nBlocks_y, nBlocks_z);
hipStream_t streams[ns];
std::cout << "nThreads: (" << nThreads.x << ", " << nThreads.y << ", " << nThreads.z << ")" << std::endl;
std::cout << "nBlocks: (" << nBlocks.x << ", " << nBlocks.y << ", " << nBlocks.z << ")" << std::endl;
t0.start();
for(int is=0; is<ns; ++is){
hipStreamCreate(&streams[is]);
hipMemcpyAsync(&d_forw_pulse[is*sizePulse], h_forw_pulses[is].wf, \
sizePulse*sizeof(fcomp), hipMemcpyHostToDevice, streams[is]);
hipMemcpyAsync(&d_back_pulse[is*sizePulse], h_back_pulses[is].wf, \
sizePulse*sizeof(fcomp), hipMemcpyHostToDevice, streams[is]);
for(int l=0; l<nextrap; ++l){
int depthIdx = l*nx*nf*length_M;
hipLaunchKernelGGL(( extrapDepths), dim3(nBlocks), dim3(nThreads), 0, streams[is], &d_forw_pulse_new[is*sizePulse], &d_back_pulse_new[is*sizePulse],\
nf, nx, M, &d_w_op_forw[depthIdx], &d_w_op_back[depthIdx], &d_forw_pulse[is*sizePulse], &d_back_pulse[is*sizePulse]);
hipLaunchKernelGGL(( imaging), dim3(1), dim3(nx), 0, streams[is], &d_image[is*sizeImage + l*nx], \
&d_forw_pulse_new[is*sizePulse], &d_back_pulse_new[is*sizePulse],\
nf, nx, M);
hipLaunchKernelGGL(( copyPadded), dim3(nBlocks), dim3(nThreads), 0, streams[is], &d_forw_pulse[is*sizePulse], &d_forw_pulse_new[is*sizePulse],\
nf, nx, M);
hipLaunchKernelGGL(( copyPadded), dim3(nBlocks), dim3(nThreads), 0, streams[is], &d_back_pulse[is*sizePulse], &d_back_pulse_new[is*sizePulse],\
nf, nx, M);
}
hipMemcpyAsync(&h_image[is*sizeImage], &d_image[is*sizeImage], \
sizeImage*sizeof(fcomp), hipMemcpyDeviceToHost, streams[is]);
hipStreamDestroy(streams[is]);
}
hipDeviceSynchronize();
t0.stop();
t2.start();
//take real part of images
for(int is=0; is<ns; ++is){
for(int l=0; l<nextrap; ++l){
for(int i=0; i<nx; ++i){
image[is*sizeImage + l*nx + i] = reinterpret_cast<float*>(h_image)[2*(is*sizeImage + l*nx + i)];
}
}
}
t2.stop();
//free device memory
hipFree(d_forw_pulse);
hipFree(d_forw_pulse_new);
hipFree(d_back_pulse);
hipFree(d_back_pulse_new);
hipFree(d_w_op_forw);
hipFree(d_w_op_back);
hipFree(d_image);
delete [] h_image;
t0.dispInfo();
t1.dispInfo();
t2.dispInfo();
} // end extrapPaddedZerosAndImaging
} //end extern "C"
| ab57d38e098cb64110df8c11c5244b423acd3d85.cu |
#include <iostream>
#include <vector>
#include <cmath>
#include "wfPad.h"
#include "timer.h"
extern "C"
{
typedef thrust::complex<float> fcomp;
__global__ void copyPadded(fcomp * paste, fcomp * copyied, int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixelIdx_y = blockIdx.y * blockDim.y + threadIdx.y;
if(pixelIdx_x < nx && pixelIdx_y < nf){
int pixelIdx = pixelIdx_y * dim_x + pixelIdx_x + M;
paste[pixelIdx] = copyied[pixelIdx];
}
}
__global__ void imaging(fcomp * image, fcomp * forw_pulse, fcomp * back_pulse, int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
fcomp conv;
for(int j=0; j<nf; j++){
int Idx = j * dim_x + pixelIdx_x + M;
conv += forw_pulse[Idx] * thrust::conj(back_pulse[Idx]);
}
image[pixelIdx_x] = conv;
}
__global__ void extrapDepths(fcomp * forw_pulse_new, fcomp * back_pulse_new, \
int nf, int nx, int M, \
fcomp * forw_w_op, fcomp * back_w_op, \
fcomp * forw_pulse_old, fcomp * back_pulse_old)
{
int length_M = 2*M+1;
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixelIdx_y = blockIdx.y * blockDim.y + threadIdx.y;
fcomp pixel_forw = fcomp(0.0,0.0); //set pixels into register memory for faster access
fcomp pixel_back = fcomp(0.0,0.0);
if(pixelIdx_x < nx && pixelIdx_y < nf){
int op_loc = pixelIdx_x * length_M;// operator's spatial location index
int op_freq = pixelIdx_y * nx * length_M;// operator's frequency index
int opIdx = op_loc + op_freq; //operator's starting location
for (int k=0; k<length_M; ++k){
int elemIdx = pixelIdx_y * dim_x + pixelIdx_x + k;
pixel_forw += forw_w_op[opIdx + k] * forw_pulse_old[elemIdx];
pixel_back += back_w_op[opIdx + k] * back_pulse_old[elemIdx];
}
int pixelIdx = pixelIdx_y * dim_x + pixelIdx_x + M;
forw_pulse_new[pixelIdx] = pixel_forw;
back_pulse_new[pixelIdx] = pixel_back;
}
} // end extrapolation to next depth
void extrapolate(int ns, int nextrap, int nz, int nt, int nf, int nx, int M,\
fcomp * w_op_forw, fcomp * forw_pulse, fcomp * w_op_back, fcomp * back_pulse,\
float * image)
{
//define important dimensionality parameters
int length_M = 2*M+1;
int dim_x = nx+2*M;
size_t sizePulse = nf * dim_x;
size_t sizeAllSources = ns * sizePulse;
size_t sizeImage = nz * nx;
size_t sizeAllImages = ns * sizeImage;
size_t sizeOp = nextrap * nf * nx * length_M;
timer t0("EXTRAPOLATION");
timer t1("CONSTRUCT PADDED WAVEFIELDS");
timer t2("READ IMAGES");
//allocate host memory
t1.start();
fcomp * h_image = new fcomp[sizeAllImages];
std::vector<wfpad> h_forw_pulses(ns);
std::vector<wfpad> h_back_pulses(ns);
for(int is=0; is<ns; ++is){
h_forw_pulses[is] = wfpad(nf, nx, 1, M, 0, &forw_pulse[is*nt*nx]);
h_back_pulses[is] = wfpad(nf, nx, 1, M, 0, &back_pulse[is*nt*nx]);
}
t1.stop();
//define device pointers and allocate memory
fcomp * d_forw_pulse, * d_forw_pulse_new;
fcomp * d_back_pulse, * d_back_pulse_new;
fcomp * d_w_op_forw, * d_w_op_back;
fcomp * d_image;
cudaMalloc(&d_forw_pulse, sizeAllSources * sizeof(fcomp));
cudaMalloc(&d_forw_pulse_new, sizeAllSources * sizeof(fcomp));
cudaMalloc(&d_back_pulse, sizeAllSources * sizeof(fcomp));
cudaMalloc(&d_back_pulse_new, sizeAllSources * sizeof(fcomp));
cudaMalloc(&d_w_op_forw, sizeOp * sizeof(fcomp));
cudaMalloc(&d_w_op_back, sizeOp * sizeof(fcomp));
cudaMalloc(&d_image, sizeAllImages * sizeof(fcomp));
//copy operators and wavefields on device
cudaMemcpy(d_w_op_forw, w_op_forw, sizeOp * sizeof(fcomp), cudaMemcpyHostToDevice);
cudaMemcpy(d_w_op_back, w_op_back, sizeOp * sizeof(fcomp), cudaMemcpyHostToDevice);
//define number of blocks and number of threads per block
dim3 nThreads(16, 1, 1);
size_t nBlocks_x = nx % nThreads.x == 0 ? size_t(nx/nThreads.x) : size_t(1 + nx/nThreads.x);
size_t nBlocks_y = nf % nThreads.y == 0 ? size_t(nf/nThreads.y) : size_t(1 + nf/nThreads.y);
size_t nBlocks_z = 1;
dim3 nBlocks(nBlocks_x, nBlocks_y, nBlocks_z);
cudaStream_t streams[ns];
std::cout << "nThreads: (" << nThreads.x << ", " << nThreads.y << ", " << nThreads.z << ")" << std::endl;
std::cout << "nBlocks: (" << nBlocks.x << ", " << nBlocks.y << ", " << nBlocks.z << ")" << std::endl;
t0.start();
for(int is=0; is<ns; ++is){
cudaStreamCreate(&streams[is]);
cudaMemcpyAsync(&d_forw_pulse[is*sizePulse], h_forw_pulses[is].wf, \
sizePulse*sizeof(fcomp), cudaMemcpyHostToDevice, streams[is]);
cudaMemcpyAsync(&d_back_pulse[is*sizePulse], h_back_pulses[is].wf, \
sizePulse*sizeof(fcomp), cudaMemcpyHostToDevice, streams[is]);
for(int l=0; l<nextrap; ++l){
int depthIdx = l*nx*nf*length_M;
extrapDepths<<<nBlocks, nThreads, 0, streams[is]>>>(&d_forw_pulse_new[is*sizePulse], &d_back_pulse_new[is*sizePulse],\
nf, nx, M, &d_w_op_forw[depthIdx], &d_w_op_back[depthIdx], &d_forw_pulse[is*sizePulse], &d_back_pulse[is*sizePulse]);
imaging<<<1, nx, 0, streams[is]>>>(&d_image[is*sizeImage + l*nx], \
&d_forw_pulse_new[is*sizePulse], &d_back_pulse_new[is*sizePulse],\
nf, nx, M);
copyPadded<<<nBlocks, nThreads, 0, streams[is]>>>(&d_forw_pulse[is*sizePulse], &d_forw_pulse_new[is*sizePulse],\
nf, nx, M);
copyPadded<<<nBlocks, nThreads, 0, streams[is]>>>(&d_back_pulse[is*sizePulse], &d_back_pulse_new[is*sizePulse],\
nf, nx, M);
}
cudaMemcpyAsync(&h_image[is*sizeImage], &d_image[is*sizeImage], \
sizeImage*sizeof(fcomp), cudaMemcpyDeviceToHost, streams[is]);
cudaStreamDestroy(streams[is]);
}
cudaDeviceSynchronize();
t0.stop();
t2.start();
//take real part of images
for(int is=0; is<ns; ++is){
for(int l=0; l<nextrap; ++l){
for(int i=0; i<nx; ++i){
image[is*sizeImage + l*nx + i] = reinterpret_cast<float*>(h_image)[2*(is*sizeImage + l*nx + i)];
}
}
}
t2.stop();
//free device memory
cudaFree(d_forw_pulse);
cudaFree(d_forw_pulse_new);
cudaFree(d_back_pulse);
cudaFree(d_back_pulse_new);
cudaFree(d_w_op_forw);
cudaFree(d_w_op_back);
cudaFree(d_image);
delete [] h_image;
t0.dispInfo();
t1.dispInfo();
t2.dispInfo();
} // end extrapPaddedZerosAndImaging
} //end extern "C"
|
a6003c0227610c866d6a0bc3360a3217b68b9593.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "softmax_op.h"
#include "softmax_with_loss_op.h"
#include "spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -log(max(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
hipLaunchKernelGGL(( SoftmaxNormalizeKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, probs, scales, probs);
} else {
hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
hipLaunchKernelGGL(( LabelCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->mutable_data<float>(), &context_);
} else {
hipLaunchKernelGGL(( ProbCrossEntropyKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream());
}
// Sum of all losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
DCHECK_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( SpatialSoftmaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream());
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.Copy<float, CUDAContext, CUDAContext>(
P.size(), P.data<float>(), dX->mutable_data<float>());
}
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
weights);
}
} else {
hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<float>(),
dX->mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream());
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream());
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
hipLaunchKernelGGL(( softmax_gradient_kernel),
dim3(N),
dim3(SOFTMAX_NUM_THREADS),
0,
context_.cuda_stream(),
D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
| a6003c0227610c866d6a0bc3360a3217b68b9593.cu | #include <cfloat>
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "softmax_op.h"
#include "softmax_with_loss_op.h"
#include "spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -log(max(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
SoftmaxNormalizeKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, probs, scales, probs);
} else {
SoftmaxNormalizeLogsKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
LabelCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->mutable_data<float>(), &context_);
} else {
ProbCrossEntropyKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream());
}
// Sum of all losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
DCHECK_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
SpatialSoftmaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialCrossEntropyLossKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream());
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
DCHECK_GE(T.ndim(), 2);
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
DCHECK_EQ(T.size(), N);
} else {
DCHECK_EQ(T.size_to_dim(canonical_axis), N);
DCHECK_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.Copy<float, CUDAContext, CUDAContext>(
P.size(), P.data<float>(), dX->mutable_data<float>());
}
LabelCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
LabelCrossEntropyGradientKernelWeighted<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
weights);
}
} else {
ProbCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<float>(),
dX->mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream());
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
DCHECK_EQ(X.ndim(), 4);
DCHECK_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialSoftmaxLossGradientKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream());
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
softmax_gradient_kernel<<<
N,
SOFTMAX_NUM_THREADS,
0,
context_.cuda_stream()>>>(
D, Y.data<float>(), dY.data<float>(), dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
6ac560123b598cda30eb699205a8fe078a840647.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 6ac560123b598cda30eb699205a8fe078a840647.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k32_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
48803654c89cb5f015262efe9d3bb94f7515529a.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// This algorithm is a variation of BUF divided into two stages.
// Initially, CCL il performed on separated blocks using shared memory.
// Then, labels on block borders are merged.
// This algorithm pewrforms worse then original BUF on every dataset, at least when using Nvidia Quadro 2200K.
#define BLOCK_ROWS 32
#define BLOCK_COLS 32
using namespace cv;
// Algorithm itself has good performances, but memory allocation is a problem.
// I will try to reduce it.
namespace {
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
//__device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) {
// bitmap |= (1 << pos);
//}
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
__device__ unsigned FindAndCompress(int *s_buf, unsigned n) {
unsigned id = n;
while (s_buf[n] != n) {
n = s_buf[n];
s_buf[id] = n;
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
__shared__ int buf[BLOCK_ROWS * BLOCK_COLS];
unsigned buf_index = threadIdx.y * BLOCK_COLS + threadIdx.x;
if (row < labels.rows && col < labels.cols) {
buf[buf_index] = buf_index;
}
__syncthreads();
if (row < labels.rows && col < labels.cols) {
// 0|1 2|3
// --+---+--
// 4|A B|
// 5|C D|
// --+---+
unsigned char P = 0;
if ((threadIdx.x > 0 || threadIdx.y > 0)) {
if (img[img_index]) {
P |= 0x37; // 00110111
}
}
if ((threadIdx.y > 0 || threadIdx.x < BLOCK_COLS - 1) && (col + 1 < img.cols)) {
if (img[img_index + 1]) {
P |= 0x0E; // 00001110
}
}
if ((threadIdx.x > 0) && (row + 1 < img.rows)) {
if (img[img_index + img.step]) {
P |= 0x30; // 00110000
}
}
if (threadIdx.x == 0) {
P &= 0xCE; // 11001110
}
if (col + 1 >= img.cols) {
P &= 0xF3; // 11110011
}
else if ((threadIdx.x + 1 == BLOCK_COLS) || (col + 2 >= img.cols)) {
P &= 0xF7; // 11110111
}
if (threadIdx.y == 0) {
P &= 0xF0; // 11110000
}
if (row + 1 >= img.rows) {
P &= 0xDF; // 11011111
}
// P is now ready to be used to find neighbour blocks (or it should be)
// P value avoids range errors
if (P > 0) {
if (HasBit(P, 0) && img[img_index - img.step - 1]) {
Union(buf, buf_index, buf_index - BLOCK_COLS - 1);
}
if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) {
Union(buf, buf_index, buf_index - BLOCK_COLS);
}
if (HasBit(P, 3) && img[img_index + 2 - img.step]) {
Union(buf, buf_index, buf_index + 1 - BLOCK_COLS);
}
if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) {
Union(buf, buf_index, buf_index - 1);
}
}
}
__syncthreads();
// Local compression
if (row < labels.rows && col < labels.cols) {
unsigned f = FindAndCompress(buf, buf_index);
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = 2 * (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + 2 * (blockIdx.x * BLOCK_COLS + f_col);
labels.data[labels_index] = global_f;
}
}
__global__ void GlobalMerge(cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned char P = 0;
if (((threadIdx.x == 0 && col > 0) || (threadIdx.y == 0 && row > 0))) {
if (img[img_index]) {
P |= 0x37; // 00110111
}
}
if (((threadIdx.y == 0 && row > 0) || (threadIdx.x == BLOCK_COLS - 1 && col + 2 < img.cols)) && (col + 1 < img.cols)) {
if (img[img_index + 1]) {
P |= 0x0E; // 00001110
}
}
if ((threadIdx.x == 0 && col > 0) && (row + 1 < img.rows)) {
if (img[img_index + img.step]) {
P |= 0x30; // 00110000
}
}
if (col == 0) {
P &= 0xCE; // 11001110
}
if (col + 1 >= img.cols) {
P &= 0xF3; // 11110011
}
else if (col + 2 >= img.cols) {
P &= 0xF7; // 11110111
}
if (row == 0) {
P &= 0xF0; // 11110000
}
if (row + 1 >= img.rows) {
P &= 0xDF; // 11011111
}
// P is now ready to be used to find neighbour blocks (or it should be)
// P value avoids range errors
if (P > 0) {
if (HasBit(P, 0) && img[img_index - img.step - 1]) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2);
}
if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size));
}
if (HasBit(P, 3) && img[img_index + 2 - img.step]) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2);
}
if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
//labels[labels_index] = Find(labels.data, labels_index);
FindAndCompress(labels.data, labels_index);
}
}
__global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
unsigned img_index = row * (img.step / img.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
int label = labels[labels_index] + 1;
if (img[img_index])
labels[labels_index] = label;
else {
labels[labels_index] = 0;
}
if (col + 1 < labels.cols) {
if (img[img_index + 1])
labels[labels_index + 1] = label;
else {
labels[labels_index + 1] = 0;
}
if (row + 1 < labels.rows) {
if (img[img_index + img.step + 1])
labels[labels_index + (labels.step / labels.elem_size) + 1] = label;
else {
labels[labels_index + (labels.step / labels.elem_size) + 1] = 0;
}
}
}
if (row + 1 < labels.rows) {
if (img[img_index + img.step])
labels[labels_index + (labels.step / labels.elem_size)] = label;
else {
labels[labels_index + (labels.step / labels.elem_size)] = 0;
}
}
}
}
}
class BUF_2S : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
BUF_2S() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i local_labels;
//cuda::GpuMat d_local_merge;
//d_img_labels_.copyTo(d_local_merge);
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_local_merge);
//d_local_merge.download(local_labels);
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//d_img_labels_.download(img_labels_);
hipDeviceSynchronize();
}
private:
void Alloc() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
}
void Dealloc() {
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// d_img_labels_.download(img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
perf_.start();
Alloc();
perf_.stop();
double alloc_timing = perf_.last();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BUF_2S);
| 48803654c89cb5f015262efe9d3bb94f7515529a.cu | #include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// This algorithm is a variation of BUF divided into two stages.
// Initially, CCL il performed on separated blocks using shared memory.
// Then, labels on block borders are merged.
// This algorithm pewrforms worse then original BUF on every dataset, at least when using Nvidia Quadro 2200K.
#define BLOCK_ROWS 32
#define BLOCK_COLS 32
using namespace cv;
// Algorithm itself has good performances, but memory allocation is a problem.
// I will try to reduce it.
namespace {
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
//__device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) {
// bitmap |= (1 << pos);
//}
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
__device__ unsigned FindAndCompress(int *s_buf, unsigned n) {
unsigned id = n;
while (s_buf[n] != n) {
n = s_buf[n];
s_buf[id] = n;
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
__shared__ int buf[BLOCK_ROWS * BLOCK_COLS];
unsigned buf_index = threadIdx.y * BLOCK_COLS + threadIdx.x;
if (row < labels.rows && col < labels.cols) {
buf[buf_index] = buf_index;
}
__syncthreads();
if (row < labels.rows && col < labels.cols) {
// 0|1 2|3
// --+---+--
// 4|A B|
// 5|C D|
// --+---+
unsigned char P = 0;
if ((threadIdx.x > 0 || threadIdx.y > 0)) {
if (img[img_index]) {
P |= 0x37; // 00110111
}
}
if ((threadIdx.y > 0 || threadIdx.x < BLOCK_COLS - 1) && (col + 1 < img.cols)) {
if (img[img_index + 1]) {
P |= 0x0E; // 00001110
}
}
if ((threadIdx.x > 0) && (row + 1 < img.rows)) {
if (img[img_index + img.step]) {
P |= 0x30; // 00110000
}
}
if (threadIdx.x == 0) {
P &= 0xCE; // 11001110
}
if (col + 1 >= img.cols) {
P &= 0xF3; // 11110011
}
else if ((threadIdx.x + 1 == BLOCK_COLS) || (col + 2 >= img.cols)) {
P &= 0xF7; // 11110111
}
if (threadIdx.y == 0) {
P &= 0xF0; // 11110000
}
if (row + 1 >= img.rows) {
P &= 0xDF; // 11011111
}
// P is now ready to be used to find neighbour blocks (or it should be)
// P value avoids range errors
if (P > 0) {
if (HasBit(P, 0) && img[img_index - img.step - 1]) {
Union(buf, buf_index, buf_index - BLOCK_COLS - 1);
}
if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) {
Union(buf, buf_index, buf_index - BLOCK_COLS);
}
if (HasBit(P, 3) && img[img_index + 2 - img.step]) {
Union(buf, buf_index, buf_index + 1 - BLOCK_COLS);
}
if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) {
Union(buf, buf_index, buf_index - 1);
}
}
}
__syncthreads();
// Local compression
if (row < labels.rows && col < labels.cols) {
unsigned f = FindAndCompress(buf, buf_index);
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = 2 * (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + 2 * (blockIdx.x * BLOCK_COLS + f_col);
labels.data[labels_index] = global_f;
}
}
__global__ void GlobalMerge(cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned img_index = row * img.step + col;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned char P = 0;
if (((threadIdx.x == 0 && col > 0) || (threadIdx.y == 0 && row > 0))) {
if (img[img_index]) {
P |= 0x37; // 00110111
}
}
if (((threadIdx.y == 0 && row > 0) || (threadIdx.x == BLOCK_COLS - 1 && col + 2 < img.cols)) && (col + 1 < img.cols)) {
if (img[img_index + 1]) {
P |= 0x0E; // 00001110
}
}
if ((threadIdx.x == 0 && col > 0) && (row + 1 < img.rows)) {
if (img[img_index + img.step]) {
P |= 0x30; // 00110000
}
}
if (col == 0) {
P &= 0xCE; // 11001110
}
if (col + 1 >= img.cols) {
P &= 0xF3; // 11110011
}
else if (col + 2 >= img.cols) {
P &= 0xF7; // 11110111
}
if (row == 0) {
P &= 0xF0; // 11110000
}
if (row + 1 >= img.rows) {
P &= 0xDF; // 11011111
}
// P is now ready to be used to find neighbour blocks (or it should be)
// P value avoids range errors
if (P > 0) {
if (HasBit(P, 0) && img[img_index - img.step - 1]) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) - 2);
}
if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size));
}
if (HasBit(P, 3) && img[img_index + 2 - img.step]) {
Union(labels.data, labels_index, labels_index - 2 * (labels.step / labels.elem_size) + 2);
}
if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 5) && img[img_index + img.step - 1])) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
}
__global__ void Compression(cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
//labels[labels_index] = Find(labels.data, labels_index);
FindAndCompress(labels.data, labels_index);
}
}
__global__ void FinalLabeling(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned row = (blockIdx.y * BLOCK_ROWS + threadIdx.y) * 2;
unsigned col = (blockIdx.x * BLOCK_COLS + threadIdx.x) * 2;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
unsigned img_index = row * (img.step / img.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
int label = labels[labels_index] + 1;
if (img[img_index])
labels[labels_index] = label;
else {
labels[labels_index] = 0;
}
if (col + 1 < labels.cols) {
if (img[img_index + 1])
labels[labels_index + 1] = label;
else {
labels[labels_index + 1] = 0;
}
if (row + 1 < labels.rows) {
if (img[img_index + img.step + 1])
labels[labels_index + (labels.step / labels.elem_size) + 1] = label;
else {
labels[labels_index + (labels.step / labels.elem_size) + 1] = 0;
}
}
}
if (row + 1 < labels.rows) {
if (img[img_index + img.step])
labels[labels_index + (labels.step / labels.elem_size)] = label;
else {
labels[labels_index + (labels.step / labels.elem_size)] = 0;
}
}
}
}
}
class BUF_2S : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
BUF_2S() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i local_labels;
//cuda::GpuMat d_local_merge;
//d_img_labels_.copyTo(d_local_merge);
//FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_local_merge);
//d_local_merge.download(local_labels);
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//d_img_labels_.download(img_labels_);
cudaDeviceSynchronize();
}
private:
void Alloc() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
}
void Dealloc() {
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((((d_img_.cols + 1) / 2) + BLOCK_COLS - 1) / BLOCK_COLS, (((d_img_.rows + 1) / 2) + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
Compression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// d_img_labels_.download(img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
perf_.start();
Alloc();
perf_.stop();
double alloc_timing = perf_.last();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BUF_2S);
|
3d62dcc55395065d5b96c8c823fb332ba8306984.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************************
dopoffs.c
Takes the delay-correction polynomial for a Doppler dataset and figures out the COM
Doppler corrections (in units of Doppler bins) for each frame.
Modified 2015 June 3 by CM:
Implement smearing for the "fit" and "write" actions
Modified 2006 June 21 by CM:
Changed dopres to dop_per_bin
Modified 2003 April 26 by CM:
Removed delay computation
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
}
__global__ void dopoffs_krnl(struct dat_t *ddat, int s, int nframes) {
/* nframes-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
int k, n;
double dop, arg, x;
if (f < nframes) {
for (k=0; k<ddat->set[s].desc.doppler.nviews; k++) {
x = 1.0;
dop = 0.0;
arg = ddat->set[s].desc.doppler.frame[f].view[k].t -
ddat->set[s].desc.doppler.delcor.t0;
for (n=1; n<=ddat->set[s].desc.doppler.delcor.n; n++) {
dop += n*ddat->set[s].desc.doppler.delcor.a[n].val*x;
x *= arg;
}
/* dop has units of usec/day and there are 86400 sec/day */
ddat->set[s].desc.doppler.frame[f].view[k].dopoff =
-dop*ddat->set[s].desc.doppler.Ftx
/ (ddat->set[s].desc.doppler.dop_per_bin*86400.0);
}
}
}
__host__ void dopoffs_gpu(struct dat_t *ddat, int s, int nframes)
{
dim3 BLK,THD;
/* Launch nframes-threaded kernel */
THD.x = nframes;
hipLaunchKernelGGL(( dopoffs_krnl), dim3(1),dim3(THD), 0, 0, ddat, s, nframes);
checkErrorAfterKernelLaunch("dopoffs_krnl");
}
| 3d62dcc55395065d5b96c8c823fb332ba8306984.cu | /*****************************************************************************************
dopoffs.c
Takes the delay-correction polynomial for a Doppler dataset and figures out the COM
Doppler corrections (in units of Doppler bins) for each frame.
Modified 2015 June 3 by CM:
Implement smearing for the "fit" and "write" actions
Modified 2006 June 21 by CM:
Changed dopres to dop_per_bin
Modified 2003 April 26 by CM:
Removed delay computation
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
}
__global__ void dopoffs_krnl(struct dat_t *ddat, int s, int nframes) {
/* nframes-threaded kernel */
int f = blockIdx.x * blockDim.x + threadIdx.x;
int k, n;
double dop, arg, x;
if (f < nframes) {
for (k=0; k<ddat->set[s].desc.doppler.nviews; k++) {
x = 1.0;
dop = 0.0;
arg = ddat->set[s].desc.doppler.frame[f].view[k].t -
ddat->set[s].desc.doppler.delcor.t0;
for (n=1; n<=ddat->set[s].desc.doppler.delcor.n; n++) {
dop += n*ddat->set[s].desc.doppler.delcor.a[n].val*x;
x *= arg;
}
/* dop has units of usec/day and there are 86400 sec/day */
ddat->set[s].desc.doppler.frame[f].view[k].dopoff =
-dop*ddat->set[s].desc.doppler.Ftx
/ (ddat->set[s].desc.doppler.dop_per_bin*86400.0);
}
}
}
__host__ void dopoffs_gpu(struct dat_t *ddat, int s, int nframes)
{
dim3 BLK,THD;
/* Launch nframes-threaded kernel */
THD.x = nframes;
dopoffs_krnl<<<1,THD>>>(ddat, s, nframes);
checkErrorAfterKernelLaunch("dopoffs_krnl");
}
|
1dea2c9e9b60ac602b6296d46073881c4a9f7ce8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef SOFTMAX_LAYER_CUH_
#define SOFTMAX_LAYER_CUH_
#include <assert.h>
#include <math.h>
#include "basics/layer.hpp"
#include "basics/tensor.cu"
#include "basics/session.hpp"
// TODO: implement CUDA kernel for backward()
#define BLOCKDIM 32
namespace SoftmaxGPUKernels {
template <class Dtype>
__global__ void ForwardGPUKernel(Tensor<Dtype>* bottom, Tensor<Dtype>* top) {
const int batch_idx = threadIdx.x;
const int batch_size = int(bottom->GetDims()[0]);
const int nchannels = int(bottom->GetDims()[3]);
Dtype max_value = 0;
for (int j = 0; j < nchannels; ++j) {
if (bottom->at(batch_idx,0,0,j) > max_value) {
max_value = bottom->at(batch_idx,0,0,j);
}
}
Dtype denominator = 0;
for (int j = 0; j < nchannels; ++j) {
top->at(batch_idx,0,0,j) = (Dtype) exp(bottom->at(batch_idx,0,0,j)-max_value);
denominator += top->at(batch_idx,0,0,j);
}
assert(denominator != 0);
for (int j = 0; j < nchannels; ++j) {
top->at(batch_idx,0,0,j) = top->at(batch_idx,0,0,j) / denominator;
}
}
template <class Dtype>
__global__ void ForwardGPU(Tensor<Dtype>* bottom, Tensor<Dtype>* top) {
assert(bottom->GetDims()[1] == 1); // The dimension of the 2nd channel should be 1
assert(bottom->GetDims()[2] == 1); // The dimension of the 3rd channel should be 1
assert(bottom->GetDims()[0] == top->GetDims()[0]); // bottom channel should be equal to top channel
assert(bottom->GetDims()[1] == top->GetDims()[1]);
assert(bottom->GetDims()[2] == top->GetDims()[2]);
assert(bottom->GetDims()[3] == top->GetDims()[3]);
hipLaunchKernelGGL(( SoftmaxGPUKernels::ForwardGPUKernel<Dtype>) , dim3(1),dim3(bottom->GetDims()[0]), 0, 0, bottom, top);
}
template <class Dtype>
__global__ void BackwardGPU(Tensor<Dtype>* top, Tensor<Dtype>* top_diff,
Tensor<Dtype>* bottom, Tensor<Dtype>* bottom_diff) {
int batch_idx = threadIdx.x;
int nchannels = top->GetDims()[3];
for (int i = 0; i < nchannels; ++i) {
bottom_diff->at(batch_idx,0,0,i) = 0;
for (int j = 0; j < nchannels; ++j) {
if (i==j) {
bottom_diff->at(batch_idx,0,0,i) +=
top->at(batch_idx,0,0,i) * (1-top->at(batch_idx,0,0,j)) * top_diff->at(batch_idx,0,0,j);
} else {
bottom_diff->at(batch_idx,0,0,i) -=
top->at(batch_idx,0,0,i) * top->at(batch_idx,0,0,j) * top_diff->at(batch_idx,0,0,j);
}
}
}
}
}
template <class Dtype>
class Softmax: public Layer<Dtype> {
public:
Softmax() {}
~Softmax() {}
void Forward(const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void Backward(const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&,
const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void GetTopsDims(const std::vector<size_t*> &, const std::vector<size_t*> &);
private:
};
template <class Dtype>
void Softmax<Dtype>::Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops) {
assert(bottoms.size() == 1); // Need only one bottom tensor
assert(tops.size() == 1); // Need only one bottom tensor
if (Session::GetSession()->gpu) {
hipLaunchKernelGGL(( SoftmaxGPUKernels::ForwardGPU<Dtype>), dim3(1), dim3(1), 0, 0, bottoms[0], tops[0]);
} else {
assert(bottoms[0]->GetDims()[1] == 1); // The dimension of the 2nd channel should be 1
assert(bottoms[0]->GetDims()[2] == 1); // The dimension of the 3rd channel should be 1
assert(bottoms[0]->GetDims()[0] == tops[0]->GetDims()[0]); // bottom channel should be equal to tops channel
assert(bottoms[0]->GetDims()[1] == tops[0]->GetDims()[1]);
assert(bottoms[0]->GetDims()[2] == tops[0]->GetDims()[2]);
assert(bottoms[0]->GetDims()[3] == tops[0]->GetDims()[3]);
const size_t batch_size = bottoms[0]->GetDims()[0];
const size_t nchannels = bottoms[0]->GetDims()[3];
Dtype denominator;
Dtype max_value;
for (int i = 0; i < batch_size; ++i) {
max_value = 0;
for (int j = 0; j < nchannels; ++j) {
if (bottoms[0]->at(i,0,0,j) > max_value) {
max_value = bottoms[0]->at(i,0,0,j);
}
}
denominator = 0;
for (int j = 0; j < nchannels; ++j) {
tops[0]->at(i,0,0,j) = (Dtype) exp(bottoms[0]->at(i,0,0,j)-max_value);
denominator += tops[0]->at(i,0,0,j);
}
for (int j = 0; j < nchannels; ++j) {
tops[0]->at(i,0,0,j) = tops[0]->at(i,0,0,j) / denominator;
}
}
}
}
template <class Dtype>
void Softmax<Dtype>::Backward(const std::vector<Tensor<Dtype>*> &tops,
const std::vector<Tensor<Dtype>*> &tops_diff,
const std::vector<Tensor<Dtype>*> &bottoms,
const std::vector<Tensor<Dtype>*> &bottoms_diff) {
assert(tops.size() == 1);
assert(tops_diff.size() == 1);
assert(bottoms.size() == 1);
assert(bottoms_diff.size() == 1);
Tensor<Dtype>* top = tops[0];
Tensor<Dtype>* top_diff = tops_diff[0];
Tensor<Dtype>* bottom = bottoms[0];
Tensor<Dtype>* bottom_diff = bottoms_diff[0];
Session* S = Session::GetSession();
int batch_size = S->batch_size;
if (S->gpu) {
hipLaunchKernelGGL(( SoftmaxGPUKernels::BackwardGPU<Dtype>), dim3(1),dim3(batch_size), 0, 0, top,top_diff,bottom,bottom_diff);
} else {
for (int b = 0; b < batch_size; ++b) {
int nchannels = top->GetDims()[3];
for (int i = 0; i < nchannels; ++i) {
bottom_diff->at(b,0,0,i) = 0;
for (int j = 0; j < nchannels; ++j) {
if (i==j) {
bottom_diff->at(b,0,0,i) += top->at(b,0,0,i) * (1-top->at(b,0,0,j)) * top_diff->at(b,0,0,j);
} else {
bottom_diff->at(b,0,0,i) -= top->at(b,0,0,i) * top->at(b,0,0,j) * top_diff->at(b,0,0,j);
}
}
}
}
}
}
template <class Dtype>
void Softmax<Dtype>::GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims) {
assert(bottoms_dims.size() == 1);
assert(tops_dims.size() == 1);
tops_dims[0][0] = bottoms_dims[0][0];
tops_dims[0][1] = bottoms_dims[0][1];
tops_dims[0][2] = bottoms_dims[0][2];
tops_dims[0][3] = bottoms_dims[0][3];
}
#endif // SOFTMAX_LAYER_CUH_
| 1dea2c9e9b60ac602b6296d46073881c4a9f7ce8.cu |
#ifndef SOFTMAX_LAYER_CUH_
#define SOFTMAX_LAYER_CUH_
#include <assert.h>
#include <math.h>
#include "basics/layer.hpp"
#include "basics/tensor.cu"
#include "basics/session.hpp"
// TODO: implement CUDA kernel for backward()
#define BLOCKDIM 32
namespace SoftmaxGPUKernels {
template <class Dtype>
__global__ void ForwardGPUKernel(Tensor<Dtype>* bottom, Tensor<Dtype>* top) {
const int batch_idx = threadIdx.x;
const int batch_size = int(bottom->GetDims()[0]);
const int nchannels = int(bottom->GetDims()[3]);
Dtype max_value = 0;
for (int j = 0; j < nchannels; ++j) {
if (bottom->at(batch_idx,0,0,j) > max_value) {
max_value = bottom->at(batch_idx,0,0,j);
}
}
Dtype denominator = 0;
for (int j = 0; j < nchannels; ++j) {
top->at(batch_idx,0,0,j) = (Dtype) exp(bottom->at(batch_idx,0,0,j)-max_value);
denominator += top->at(batch_idx,0,0,j);
}
assert(denominator != 0);
for (int j = 0; j < nchannels; ++j) {
top->at(batch_idx,0,0,j) = top->at(batch_idx,0,0,j) / denominator;
}
}
template <class Dtype>
__global__ void ForwardGPU(Tensor<Dtype>* bottom, Tensor<Dtype>* top) {
assert(bottom->GetDims()[1] == 1); // The dimension of the 2nd channel should be 1
assert(bottom->GetDims()[2] == 1); // The dimension of the 3rd channel should be 1
assert(bottom->GetDims()[0] == top->GetDims()[0]); // bottom channel should be equal to top channel
assert(bottom->GetDims()[1] == top->GetDims()[1]);
assert(bottom->GetDims()[2] == top->GetDims()[2]);
assert(bottom->GetDims()[3] == top->GetDims()[3]);
SoftmaxGPUKernels::ForwardGPUKernel<Dtype> <<<1,bottom->GetDims()[0]>>>(bottom, top);
}
template <class Dtype>
__global__ void BackwardGPU(Tensor<Dtype>* top, Tensor<Dtype>* top_diff,
Tensor<Dtype>* bottom, Tensor<Dtype>* bottom_diff) {
int batch_idx = threadIdx.x;
int nchannels = top->GetDims()[3];
for (int i = 0; i < nchannels; ++i) {
bottom_diff->at(batch_idx,0,0,i) = 0;
for (int j = 0; j < nchannels; ++j) {
if (i==j) {
bottom_diff->at(batch_idx,0,0,i) +=
top->at(batch_idx,0,0,i) * (1-top->at(batch_idx,0,0,j)) * top_diff->at(batch_idx,0,0,j);
} else {
bottom_diff->at(batch_idx,0,0,i) -=
top->at(batch_idx,0,0,i) * top->at(batch_idx,0,0,j) * top_diff->at(batch_idx,0,0,j);
}
}
}
}
}
template <class Dtype>
class Softmax: public Layer<Dtype> {
public:
Softmax() {}
~Softmax() {}
void Forward(const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void Backward(const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&,
const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void GetTopsDims(const std::vector<size_t*> &, const std::vector<size_t*> &);
private:
};
template <class Dtype>
void Softmax<Dtype>::Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops) {
assert(bottoms.size() == 1); // Need only one bottom tensor
assert(tops.size() == 1); // Need only one bottom tensor
if (Session::GetSession()->gpu) {
SoftmaxGPUKernels::ForwardGPU<Dtype><<<1, 1>>>(bottoms[0], tops[0]);
} else {
assert(bottoms[0]->GetDims()[1] == 1); // The dimension of the 2nd channel should be 1
assert(bottoms[0]->GetDims()[2] == 1); // The dimension of the 3rd channel should be 1
assert(bottoms[0]->GetDims()[0] == tops[0]->GetDims()[0]); // bottom channel should be equal to tops channel
assert(bottoms[0]->GetDims()[1] == tops[0]->GetDims()[1]);
assert(bottoms[0]->GetDims()[2] == tops[0]->GetDims()[2]);
assert(bottoms[0]->GetDims()[3] == tops[0]->GetDims()[3]);
const size_t batch_size = bottoms[0]->GetDims()[0];
const size_t nchannels = bottoms[0]->GetDims()[3];
Dtype denominator;
Dtype max_value;
for (int i = 0; i < batch_size; ++i) {
max_value = 0;
for (int j = 0; j < nchannels; ++j) {
if (bottoms[0]->at(i,0,0,j) > max_value) {
max_value = bottoms[0]->at(i,0,0,j);
}
}
denominator = 0;
for (int j = 0; j < nchannels; ++j) {
tops[0]->at(i,0,0,j) = (Dtype) exp(bottoms[0]->at(i,0,0,j)-max_value);
denominator += tops[0]->at(i,0,0,j);
}
for (int j = 0; j < nchannels; ++j) {
tops[0]->at(i,0,0,j) = tops[0]->at(i,0,0,j) / denominator;
}
}
}
}
template <class Dtype>
void Softmax<Dtype>::Backward(const std::vector<Tensor<Dtype>*> &tops,
const std::vector<Tensor<Dtype>*> &tops_diff,
const std::vector<Tensor<Dtype>*> &bottoms,
const std::vector<Tensor<Dtype>*> &bottoms_diff) {
assert(tops.size() == 1);
assert(tops_diff.size() == 1);
assert(bottoms.size() == 1);
assert(bottoms_diff.size() == 1);
Tensor<Dtype>* top = tops[0];
Tensor<Dtype>* top_diff = tops_diff[0];
Tensor<Dtype>* bottom = bottoms[0];
Tensor<Dtype>* bottom_diff = bottoms_diff[0];
Session* S = Session::GetSession();
int batch_size = S->batch_size;
if (S->gpu) {
SoftmaxGPUKernels::BackwardGPU<Dtype><<<1,batch_size>>>(top,top_diff,bottom,bottom_diff);
} else {
for (int b = 0; b < batch_size; ++b) {
int nchannels = top->GetDims()[3];
for (int i = 0; i < nchannels; ++i) {
bottom_diff->at(b,0,0,i) = 0;
for (int j = 0; j < nchannels; ++j) {
if (i==j) {
bottom_diff->at(b,0,0,i) += top->at(b,0,0,i) * (1-top->at(b,0,0,j)) * top_diff->at(b,0,0,j);
} else {
bottom_diff->at(b,0,0,i) -= top->at(b,0,0,i) * top->at(b,0,0,j) * top_diff->at(b,0,0,j);
}
}
}
}
}
}
template <class Dtype>
void Softmax<Dtype>::GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims) {
assert(bottoms_dims.size() == 1);
assert(tops_dims.size() == 1);
tops_dims[0][0] = bottoms_dims[0][0];
tops_dims[0][1] = bottoms_dims[0][1];
tops_dims[0][2] = bottoms_dims[0][2];
tops_dims[0][3] = bottoms_dims[0][3];
}
#endif // SOFTMAX_LAYER_CUH_
|
4990bee16e2de805e77f018ebbf23baa01eadad5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
__global__ void kernel_cuda_frontier(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
bool *updated,
bool *frontier,
bool *still_running,
bool *visited)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = 0; v < num_vertices; v += num_threads)
{
int vertex = v + tid;
if (vertex < num_vertices && frontier[vertex])
{
frontier[vertex] = false;
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (!visited[neighbor])
{
result[neighbor] = result[vertex] + 1;
updated[neighbor] = true;
}
}
}
}
}
__global__ void kernel_cuda_frontier_update_flags(
int num_vertices,
bool *still_running,
bool *updated,
bool *frontier,
bool *visited)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = 0; v < num_vertices; v += num_threads)
{
int vertex = v + tid;
if (vertex < num_vertices && updated[vertex])
{
frontier[vertex] = true;
updated[vertex] = false;
visited[vertex] = true;
*still_running = true;
}
}
}
int bfs_cuda_frontier(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
bool *k_updated;
bool *k_still_running;
bool *k_frontier;
bool *k_visited;
int kernel_runs = 0;
bool *updated = new bool[num_vertices];
fill_n(updated, num_vertices, false);
bool *visited = new bool[num_vertices];
fill_n(visited, num_vertices, false);
visited[start_vertex] = true;
bool *frontier = new bool[num_vertices];
fill_n(frontier, num_vertices, false);
frontier[start_vertex] = true;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
bool *still_running = new bool[1];
bool false_value = false;
hipMalloc(&k_v_adj_list, sizeof(int) * num_edges);
hipMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
hipMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
hipMalloc(&k_result, sizeof(int) * num_vertices);
hipMalloc(&k_updated, sizeof(bool) * num_vertices);
hipMalloc(&k_frontier, sizeof(bool) * num_vertices);
hipMalloc(&k_still_running, sizeof(bool) * 1);
hipMalloc(&k_visited, sizeof(bool) * num_vertices);
hipMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_result, result, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_updated, updated, sizeof(bool) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_visited, visited, sizeof(bool) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_frontier, frontier, sizeof(bool) * num_vertices, hipMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
*still_running = false;
do
{
hipMemcpy(k_still_running, &false_value, sizeof(bool) * 1, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_cuda_frontier), dim3(BLOCKS), dim3(THREADS), 0, 0,
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
k_updated,
k_frontier,
k_still_running,
k_visited);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_cuda_frontier_update_flags), dim3(BLOCKS), dim3(THREADS), 0, 0,
num_vertices,
k_still_running,
k_updated,
k_frontier,
k_visited);
hipDeviceSynchronize();
kernel_runs++;
hipMemcpy(still_running, k_still_running, sizeof(bool) * 1, hipMemcpyDeviceToHost);
} while (*still_running);
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
hipMemcpy(result, k_result, sizeof(int) * num_vertices, hipMemcpyDeviceToHost);
hipFree(k_v_adj_list);
hipFree(k_v_adj_begin);
hipFree(k_v_adj_length);
hipFree(k_result);
hipFree(k_still_running);
hipFree(k_updated);
hipFree(k_frontier);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
| 4990bee16e2de805e77f018ebbf23baa01eadad5.cu | // Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
__global__ void kernel_cuda_frontier(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
bool *updated,
bool *frontier,
bool *still_running,
bool *visited)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = 0; v < num_vertices; v += num_threads)
{
int vertex = v + tid;
if (vertex < num_vertices && frontier[vertex])
{
frontier[vertex] = false;
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (!visited[neighbor])
{
result[neighbor] = result[vertex] + 1;
updated[neighbor] = true;
}
}
}
}
}
__global__ void kernel_cuda_frontier_update_flags(
int num_vertices,
bool *still_running,
bool *updated,
bool *frontier,
bool *visited)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = 0; v < num_vertices; v += num_threads)
{
int vertex = v + tid;
if (vertex < num_vertices && updated[vertex])
{
frontier[vertex] = true;
updated[vertex] = false;
visited[vertex] = true;
*still_running = true;
}
}
}
int bfs_cuda_frontier(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
bool *k_updated;
bool *k_still_running;
bool *k_frontier;
bool *k_visited;
int kernel_runs = 0;
bool *updated = new bool[num_vertices];
fill_n(updated, num_vertices, false);
bool *visited = new bool[num_vertices];
fill_n(visited, num_vertices, false);
visited[start_vertex] = true;
bool *frontier = new bool[num_vertices];
fill_n(frontier, num_vertices, false);
frontier[start_vertex] = true;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
bool *still_running = new bool[1];
bool false_value = false;
cudaMalloc(&k_v_adj_list, sizeof(int) * num_edges);
cudaMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
cudaMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
cudaMalloc(&k_result, sizeof(int) * num_vertices);
cudaMalloc(&k_updated, sizeof(bool) * num_vertices);
cudaMalloc(&k_frontier, sizeof(bool) * num_vertices);
cudaMalloc(&k_still_running, sizeof(bool) * 1);
cudaMalloc(&k_visited, sizeof(bool) * num_vertices);
cudaMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_result, result, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_updated, updated, sizeof(bool) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_visited, visited, sizeof(bool) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_frontier, frontier, sizeof(bool) * num_vertices, cudaMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
*still_running = false;
do
{
cudaMemcpy(k_still_running, &false_value, sizeof(bool) * 1, cudaMemcpyHostToDevice);
kernel_cuda_frontier<<<BLOCKS, THREADS>>>(
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
k_updated,
k_frontier,
k_still_running,
k_visited);
cudaThreadSynchronize();
kernel_cuda_frontier_update_flags<<<BLOCKS, THREADS>>>(
num_vertices,
k_still_running,
k_updated,
k_frontier,
k_visited);
cudaThreadSynchronize();
kernel_runs++;
cudaMemcpy(still_running, k_still_running, sizeof(bool) * 1, cudaMemcpyDeviceToHost);
} while (*still_running);
cudaThreadSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
cudaMemcpy(result, k_result, sizeof(int) * num_vertices, cudaMemcpyDeviceToHost);
cudaFree(k_v_adj_list);
cudaFree(k_v_adj_begin);
cudaFree(k_v_adj_length);
cudaFree(k_result);
cudaFree(k_still_running);
cudaFree(k_updated);
cudaFree(k_frontier);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
|
63d9f1ac996f394481e09ee61680e82d692bb888.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#define NUM_THREADS 512
#define BLOCK 512
#define IDX(b,l,t,m,i,cum,L) (i+2*(m+t*(2*l+1)+cum[l]+b*cum[L+1]))
#define PLUSMINUS(k) ((k%2==1) ? -1 : 1)
#define LOGFACT(n,mem) ((n < 2) ? 0. : mem[n])
int rounded_division(int number1, int number2) {
if (number1 % number2 == 0) {
return number1 / number2;
}
return number1 / number2 + 1;
}
dim3 cuda_gridsize(int n){
int k = (n - 1) / BLOCK + 1;
int x = k;
int y = 1;
if (x > 65535){
x = ceil(sqrt(k));
y = (n - 1) / (x * BLOCK) + 1;
}
dim3 d(x, y, 1);
return d;
}
namespace {
__device__ __forceinline__ float _naiveCG(
int l1, int l2, int l, int m1, int m2, int m,
const double* mem){
int m3=-m;
int t1=l2-m1-l;
int t2=l1+m2-l;
int t3=l1+l2-l;
int t4=l1-m1;
int t5=l2+m2;
int tmin=max(0,max(t1,t2));
int tmax=min(t3,min(t4,t5));
double wigner=0;
double logA=(log((double)2*l+1)+LOGFACT(l+l1-l2,mem)+LOGFACT(l-l1+l2,mem)+LOGFACT(l1+l2-l,mem)-LOGFACT(l1+l2+l+1,mem))/2;
logA+=(LOGFACT(l-m3,mem)+LOGFACT(l+m3,mem)+LOGFACT(l1-m1,mem)+LOGFACT(l1+m1,mem)+LOGFACT(l2-m2,mem)+LOGFACT(l2+m2,mem))/2;
for(int t=tmin; t<=tmax; t++){
double logB = LOGFACT(t,mem)+LOGFACT(t3-t,mem)+LOGFACT(t4-t,mem)+LOGFACT(t5-t,mem)+LOGFACT(-t1+t,mem)+LOGFACT(-t2+t,mem);
wigner += PLUSMINUS(t)*exp(logA-logB);
}
return (float) PLUSMINUS(l1-l2-m3)*PLUSMINUS(l1-l2+m)*wigner;
}
__device__ __forceinline__ float naiveCG_cal_m(
int l1, int l2, int l, int m1, int m2,
const double* mem){
return _naiveCG(l1, l2, l, m1, m2, m1+m2, mem);
}
__device__ float naiveCG_cal_m1(
int l1, int l2, int l, int m, int m2,
const double* mem){
return _naiveCG(l1, l2, l, m - m2, m2, m, mem);
}
__global__ void cudaprecomputeCG_job(
float* __restrict__ CG,
const double* __restrict__ logfact,
int Lmax,
int Batch_size) {
const int global_threadId = blockIdx.x * blockDim.x + threadIdx.x;
const int L1 = (Lmax + 1);
const int L2 = L1*L1, L3=L1*L1*L1;
if (global_threadId < L3*(2*Lmax+1)){
int m2 = global_threadId % (2*Lmax+1);
int l_remainder = global_threadId / (2*Lmax+1);
int l1 = l_remainder / L2;
int l2 = (l_remainder / L1) % L1;
int l = l_remainder % L1;
if (l2 <= l1 && l1-l2 <= l && l <= l1+l2 && m2 < 2*l2+1){
int start = 0;
for (int templ1=0; templ1 <= l1; templ1++){
for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){
int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1;
for (int templ=low; templ<=high ; templ++){
start += (2*templ2+1)*(2*templ+1);
}
}
}
for (int templ = l1-l2; templ<l; templ++){
start += (2*l2+1)*(templ*2+1);
}
//offset m2
start += m2*(2*l+1);
for (int m = 0; m < 2*l+1;m++){
int m1 = (m-l) - (m2-l2);
if (-l1 <= m1 && m1 <= l1){
CG[start + m] = naiveCG_cal_m1(l1,l2,l,m-l,m2-l2,logfact);
//CG[start + m] = 100*l1 + 10*l2 + l + 0.1*(m1+l1) + 0.01*m2 + 0.001*m;
}
}
}
}
}
//==================================================================================================================
__global__ void cudaCG_forward_kernel(
const float* tensor,
float* out_tensor,
const int* taus,
const int* cum_tauIn_m,
const int* cum_tauMiddle_m,
const double* logfact,
int Lmax,
int Batch_size) {
int global_threadId = blockIdx.x * blockDim.x + threadIdx.x;
int L1 = (Lmax+1);
int Entry_size = L1 * (Lmax+2) * (Lmax +1) / 2;
if (global_threadId < Batch_size * Entry_size){
int b = global_threadId / Entry_size;
int l = global_threadId % L1;
int remainder_for_l = (global_threadId % Entry_size) / L1;
int l1 = 0, l2 = remainder_for_l * 2;
while (l1*(l1+1) <= l2){l1++;}
l1 -= 1;
l2 = (l2 - l1*(l1+1))/2;
if (l2 <= l1 && l1 - l2 <= l && l <= l1 + l2){
int t_offset = 0;
for (int templ1 = 0; templ1<l1; templ1++){
for (int templ2 = 0; templ2<=templ1; templ2++){
if (l <= templ2 + templ1 && l >= templ1- templ2){
t_offset += taus[templ1]*taus[templ2];
}
}
}
for (int templ2 = 0; templ2<=l2; templ2++){
if (l <= templ2 + l1 && l >= l1- templ2){
t_offset += taus[l1]*taus[templ2];
}
}
t_offset -= taus[l1]*taus[l2];
for (int m1 = -l1; m1 <= l1; m1++){
for (int m2 = -l2; m2 <= l2; m2++){
int m = m1 + m2;
if (-l <= m && m <= l){
float CGcoef = naiveCG_cal_m(l1,l2,l,m1,m2,logfact);
for (int t1 = 0; t1 < taus[l1]; t1++){
for (int t2 = 0; t2 < taus[l2]; t2++){
int t = t1 * taus[l2] + t2 + t_offset;
float real1 = tensor[IDX(b,l1,t1,m1+l1,0,cum_tauIn_m,Lmax)];
float imag1 = tensor[IDX(b,l1,t1,m1+l1,1,cum_tauIn_m,Lmax)];
float real2 = tensor[IDX(b,l2,t2,m2+l2,0,cum_tauIn_m,Lmax)];
float imag2 = tensor[IDX(b,l2,t2,m2+l2,1,cum_tauIn_m,Lmax)];
out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] += (real1 * real2 - imag1 * imag2) * CGcoef;
out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] += (real1 * imag2 + real2 * imag1) * CGcoef;
//out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] = t + 0.01 * t_offset;
//out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] = m+l+0.1 * l1 + 0.01 * l2 + 0.001*l;
//return;
}
}
}
}
}
}
}
}
__global__ void cudaCG_backward_kernel(
const float* tensor,
float* g_in,
const float* g_out,
const int* taus,
const int* cum_taus,
const int* cum_new_taus,
const float* CG,
int Lmax,
int Batch_size) {
int global_threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (global_threadId < Batch_size * cum_taus[Lmax+1]){
int b = global_threadId / cum_taus[Lmax + 1];
int ltm1 = global_threadId % cum_taus[Lmax + 1];
int l1 = 0;
while (cum_taus[l1]<=ltm1) {
l1++;
}
l1 -= 1;
int tm1 = ltm1 - cum_taus[l1];
int t1 = tm1 / (2*l1+1);
int m1 = tm1 % (2*l1+1);
//m1 -= l1;
int l2 = 0, m2 = 0, t2 = 0;
float real1=0, imag1=0;
for (l2 = 0; l2 <= l1; l2++){
for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){
int CG_offset=0, t_offset=0;
for (int templ1=0; templ1 <= l1; templ1++){
for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){
int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1;
for (int templ=low; templ<=high ; templ++){
CG_offset += (2*templ2+1)*(2*templ+1);
}
if (l <= templ1 + templ2 && l >= templ1 - templ2){
t_offset += taus[templ1]*taus[templ2];
}
}
}
for (int templ = l1-l2; templ<l; templ++){
CG_offset += (2*l2+1)*(templ*2+1);
}
for (m2 = 0; m2 < 2*l2+1; m2++){
for (int m = 0; m < 2*l+1; m++){
if (m1-l1 + m2-l2 == m-l){
float CGcoef = CG[CG_offset+(2*l+1)*m2+m];
for (t2 = 0; t2 < taus[l2]; t2++){
int t = taus[l2] * t1 + t2 + t_offset;
float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)];
float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)];
float real2 = tensor[IDX(b,l2,t2,m2,0,cum_taus,Lmax)];
float imag2 = tensor[IDX(b,l2,t2,m2,1,cum_taus,Lmax)];
real1 += (real * real2 + imag * imag2) * CGcoef;
imag1 += (real2 * imag - real * imag2) * CGcoef;
}
}
}
}
}
}
//Now switching to treat l1 as a "l2"
l2 = l1;
t2 = t1;
m2 = m1;
for (l1 = l2; l1 <= Lmax; l1++){
for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){
int CG_offset=0, t_offset=0;
for (int templ1=0; templ1 <= l1; templ1++){
for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){
int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1;
for (int templ=low; templ<=high ; templ++){
CG_offset += (2*templ2+1)*(2*templ+1);
}
if (l <= templ1 + templ2 && l >= templ1 - templ2){
t_offset += taus[templ1]*taus[templ2];
}
}
}
for (int templ = l1-l2; templ<l; templ++){
CG_offset += (2*l2+1)*(templ*2+1);
}
for (m1 = 0; m1 < 2*l1+1; m1++){
for (int m = 0; m < 2*l+1; m++){
if (m1-l1 + m2-l2 == m-l){
float CGcoef = CG[CG_offset+(2*l+1)*m2+m];
for (t1 = 0; t1 < taus[l1]; t1++){
int t = taus[l2] * t1 + t2 + t_offset;
float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)];
float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)];
//This time we need to access l1 t1 and m1
float real2 = tensor[IDX(b,l1,t1,m1,0,cum_taus,Lmax)];
float imag2 = tensor[IDX(b,l1,t1,m1,1,cum_taus,Lmax)];
real1 += (real * real2 + imag * imag2) * CGcoef;
imag1 += (real2 * imag - real * imag2) * CGcoef;
}
}
}
}
}
}
g_in[global_threadId*2] = real1;
g_in[global_threadId*2+1] = imag1;
}
}
} // namespace
void print_arr(int* v, int l){
printf("vector: (");
for (int i = 0; i < l; i++){
printf("%d, ", v[i]);
}
printf(")\n");
return;
}
int* _get_cum_tau(int* taus, int L){
int* cum_tau = (int*) malloc((L+2)*sizeof(int));
cum_tau[0] = 0;
for (int l = 0; l <= L; l++){
cum_tau[l+1] = cum_tau[l] + (2 * l + 1) * taus[l];
}
return cum_tau;
}
void CG_cuda_forward(
torch::Tensor input,
torch::Tensor output,
int L,
int B,
torch::Tensor taus_tensor){
//auto output = torch::zeros_like(old_cell);
float* F = input.data<float>();
float* out = output.data<float>();
int* taus = taus_tensor.data<int>();
//printf("len(taus) = %d\n", taus_tensor.size(0));
int* new_taus = (int*) calloc(L+1, sizeof(int));
for (int l1 = 0; l1 <= L; l1++){
for (int l2 = 0; l2 <= l1; l2++){
for (int l = l1-l2; l <=L && l <= l1 + l2; l++){
new_taus[l] += taus[l1] * taus[l2];
}
}
}
int* cum_tauIn_m = _get_cum_tau(taus, L);
int* cum_tauMiddle_m = _get_cum_tau(new_taus, L);
int size = B * (L+1) * (L+2) * (L+1) /2;
int LOGFACT_SIZE=5*L+20;
double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double));
for (int i = 2; i < LOGFACT_SIZE; i++){
logfact[i] = logfact[i-1] + log((double) i);
}
double* cuda_logfact;
hipMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double));
hipMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), hipMemcpyHostToDevice);
int *cuda_tauIn, *cuda_cum_tauIn_m, *cuda_cum_tauMiddle_m;
hipMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int));
hipMemcpy(cuda_tauIn, taus, (L+1)*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int));
hipMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int));
hipMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaCG_forward_kernel), dim3(cuda_gridsize(size)), dim3(NUM_THREADS), 0, 0,
F, out,
cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m,
cuda_logfact, L, B);
}
void CG_cuda_backward(
torch::Tensor input,
torch::Tensor grad_in,
torch::Tensor grad_out,
torch::Tensor CG_tensor,
int L,
int B,
torch::Tensor taus_tensor){
float* F = input.data<float>();
float* g_in = grad_in.data<float>();
float* g_out = grad_out.data<float>();
int* taus = taus_tensor.data<int>();
int* new_taus = (int*) calloc(L+1, sizeof(int));
for (int l1 = 0; l1 <= L; l1++){
for (int l2 = 0; l2 <= l1; l2++){
for (int l = l1-l2; l <=L && l <= l1 + l2; l++){
new_taus[l] += taus[l1] * taus[l2];
}
}
}
int* cum_tauIn_m = _get_cum_tau(taus, L);
int* cum_tauMiddle_m = _get_cum_tau(new_taus, L);
int *cuda_tauIn, *cuda_cum_tauIn_m, *cuda_cum_tauMiddle_m;
hipMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int));
hipMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int));
hipMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int));
hipMemcpy(cuda_tauIn, taus, (L+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), hipMemcpyHostToDevice);
//Compute taus such that taus[l] is the starting position of the tensor
int size = B * cum_tauIn_m[L+1];
//printf("Need %d threads\n", size);
//Prep for CG
int LOGFACT_SIZE=5*L+20;
double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double));
for (int i = 2; i < LOGFACT_SIZE; i++){
logfact[i] = logfact[i-1] + log((double) i);
}
double* cuda_logfact;
hipMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double));
hipMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), hipMemcpyHostToDevice);
float* CG = CG_tensor.data<float>();
int size0 = (L+1)*(L+1)*(L+1)*(2*L+1);
hipLaunchKernelGGL(( cudaprecomputeCG_job), dim3(cuda_gridsize(size0)), dim3(NUM_THREADS), 0, 0,
CG,
cuda_logfact, L, B);
hipDeviceSynchronize();
hipLaunchKernelGGL(( cudaCG_backward_kernel), dim3(cuda_gridsize(size)), dim3(NUM_THREADS), 0, 0,
F, g_in, g_out,
cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m,
CG, L, B);
hipDeviceSynchronize();
hipFree(cuda_logfact);
hipFree(cuda_tauIn);
hipFree(cuda_cum_tauIn_m);
hipFree(cuda_cum_tauMiddle_m);
free(logfact);
free(cum_tauIn_m);
free(cum_tauMiddle_m);
}
| 63d9f1ac996f394481e09ee61680e82d692bb888.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <curand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#define NUM_THREADS 512
#define BLOCK 512
#define IDX(b,l,t,m,i,cum,L) (i+2*(m+t*(2*l+1)+cum[l]+b*cum[L+1]))
#define PLUSMINUS(k) ((k%2==1) ? -1 : 1)
#define LOGFACT(n,mem) ((n < 2) ? 0. : mem[n])
int rounded_division(int number1, int number2) {
if (number1 % number2 == 0) {
return number1 / number2;
}
return number1 / number2 + 1;
}
dim3 cuda_gridsize(int n){
int k = (n - 1) / BLOCK + 1;
int x = k;
int y = 1;
if (x > 65535){
x = ceil(sqrt(k));
y = (n - 1) / (x * BLOCK) + 1;
}
dim3 d(x, y, 1);
return d;
}
namespace {
__device__ __forceinline__ float _naiveCG(
int l1, int l2, int l, int m1, int m2, int m,
const double* mem){
int m3=-m;
int t1=l2-m1-l;
int t2=l1+m2-l;
int t3=l1+l2-l;
int t4=l1-m1;
int t5=l2+m2;
int tmin=max(0,max(t1,t2));
int tmax=min(t3,min(t4,t5));
double wigner=0;
double logA=(log((double)2*l+1)+LOGFACT(l+l1-l2,mem)+LOGFACT(l-l1+l2,mem)+LOGFACT(l1+l2-l,mem)-LOGFACT(l1+l2+l+1,mem))/2;
logA+=(LOGFACT(l-m3,mem)+LOGFACT(l+m3,mem)+LOGFACT(l1-m1,mem)+LOGFACT(l1+m1,mem)+LOGFACT(l2-m2,mem)+LOGFACT(l2+m2,mem))/2;
for(int t=tmin; t<=tmax; t++){
double logB = LOGFACT(t,mem)+LOGFACT(t3-t,mem)+LOGFACT(t4-t,mem)+LOGFACT(t5-t,mem)+LOGFACT(-t1+t,mem)+LOGFACT(-t2+t,mem);
wigner += PLUSMINUS(t)*exp(logA-logB);
}
return (float) PLUSMINUS(l1-l2-m3)*PLUSMINUS(l1-l2+m)*wigner;
}
__device__ __forceinline__ float naiveCG_cal_m(
int l1, int l2, int l, int m1, int m2,
const double* mem){
return _naiveCG(l1, l2, l, m1, m2, m1+m2, mem);
}
__device__ float naiveCG_cal_m1(
int l1, int l2, int l, int m, int m2,
const double* mem){
return _naiveCG(l1, l2, l, m - m2, m2, m, mem);
}
__global__ void cudaprecomputeCG_job(
float* __restrict__ CG,
const double* __restrict__ logfact,
int Lmax,
int Batch_size) {
const int global_threadId = blockIdx.x * blockDim.x + threadIdx.x;
const int L1 = (Lmax + 1);
const int L2 = L1*L1, L3=L1*L1*L1;
if (global_threadId < L3*(2*Lmax+1)){
int m2 = global_threadId % (2*Lmax+1);
int l_remainder = global_threadId / (2*Lmax+1);
int l1 = l_remainder / L2;
int l2 = (l_remainder / L1) % L1;
int l = l_remainder % L1;
if (l2 <= l1 && l1-l2 <= l && l <= l1+l2 && m2 < 2*l2+1){
int start = 0;
for (int templ1=0; templ1 <= l1; templ1++){
for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){
int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1;
for (int templ=low; templ<=high ; templ++){
start += (2*templ2+1)*(2*templ+1);
}
}
}
for (int templ = l1-l2; templ<l; templ++){
start += (2*l2+1)*(templ*2+1);
}
//offset m2
start += m2*(2*l+1);
for (int m = 0; m < 2*l+1;m++){
int m1 = (m-l) - (m2-l2);
if (-l1 <= m1 && m1 <= l1){
CG[start + m] = naiveCG_cal_m1(l1,l2,l,m-l,m2-l2,logfact);
//CG[start + m] = 100*l1 + 10*l2 + l + 0.1*(m1+l1) + 0.01*m2 + 0.001*m;
}
}
}
}
}
//==================================================================================================================
__global__ void cudaCG_forward_kernel(
const float* tensor,
float* out_tensor,
const int* taus,
const int* cum_tauIn_m,
const int* cum_tauMiddle_m,
const double* logfact,
int Lmax,
int Batch_size) {
int global_threadId = blockIdx.x * blockDim.x + threadIdx.x;
int L1 = (Lmax+1);
int Entry_size = L1 * (Lmax+2) * (Lmax +1) / 2;
if (global_threadId < Batch_size * Entry_size){
int b = global_threadId / Entry_size;
int l = global_threadId % L1;
int remainder_for_l = (global_threadId % Entry_size) / L1;
int l1 = 0, l2 = remainder_for_l * 2;
while (l1*(l1+1) <= l2){l1++;}
l1 -= 1;
l2 = (l2 - l1*(l1+1))/2;
if (l2 <= l1 && l1 - l2 <= l && l <= l1 + l2){
int t_offset = 0;
for (int templ1 = 0; templ1<l1; templ1++){
for (int templ2 = 0; templ2<=templ1; templ2++){
if (l <= templ2 + templ1 && l >= templ1- templ2){
t_offset += taus[templ1]*taus[templ2];
}
}
}
for (int templ2 = 0; templ2<=l2; templ2++){
if (l <= templ2 + l1 && l >= l1- templ2){
t_offset += taus[l1]*taus[templ2];
}
}
t_offset -= taus[l1]*taus[l2];
for (int m1 = -l1; m1 <= l1; m1++){
for (int m2 = -l2; m2 <= l2; m2++){
int m = m1 + m2;
if (-l <= m && m <= l){
float CGcoef = naiveCG_cal_m(l1,l2,l,m1,m2,logfact);
for (int t1 = 0; t1 < taus[l1]; t1++){
for (int t2 = 0; t2 < taus[l2]; t2++){
int t = t1 * taus[l2] + t2 + t_offset;
float real1 = tensor[IDX(b,l1,t1,m1+l1,0,cum_tauIn_m,Lmax)];
float imag1 = tensor[IDX(b,l1,t1,m1+l1,1,cum_tauIn_m,Lmax)];
float real2 = tensor[IDX(b,l2,t2,m2+l2,0,cum_tauIn_m,Lmax)];
float imag2 = tensor[IDX(b,l2,t2,m2+l2,1,cum_tauIn_m,Lmax)];
out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] += (real1 * real2 - imag1 * imag2) * CGcoef;
out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] += (real1 * imag2 + real2 * imag1) * CGcoef;
//out_tensor[IDX(b,l,t,m+l,0,cum_tauMiddle_m,Lmax)] = t + 0.01 * t_offset;
//out_tensor[IDX(b,l,t,m+l,1,cum_tauMiddle_m,Lmax)] = m+l+0.1 * l1 + 0.01 * l2 + 0.001*l;
//return;
}
}
}
}
}
}
}
}
__global__ void cudaCG_backward_kernel(
const float* tensor,
float* g_in,
const float* g_out,
const int* taus,
const int* cum_taus,
const int* cum_new_taus,
const float* CG,
int Lmax,
int Batch_size) {
int global_threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (global_threadId < Batch_size * cum_taus[Lmax+1]){
int b = global_threadId / cum_taus[Lmax + 1];
int ltm1 = global_threadId % cum_taus[Lmax + 1];
int l1 = 0;
while (cum_taus[l1]<=ltm1) {
l1++;
}
l1 -= 1;
int tm1 = ltm1 - cum_taus[l1];
int t1 = tm1 / (2*l1+1);
int m1 = tm1 % (2*l1+1);
//m1 -= l1;
int l2 = 0, m2 = 0, t2 = 0;
float real1=0, imag1=0;
for (l2 = 0; l2 <= l1; l2++){
for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){
int CG_offset=0, t_offset=0;
for (int templ1=0; templ1 <= l1; templ1++){
for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){
int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1;
for (int templ=low; templ<=high ; templ++){
CG_offset += (2*templ2+1)*(2*templ+1);
}
if (l <= templ1 + templ2 && l >= templ1 - templ2){
t_offset += taus[templ1]*taus[templ2];
}
}
}
for (int templ = l1-l2; templ<l; templ++){
CG_offset += (2*l2+1)*(templ*2+1);
}
for (m2 = 0; m2 < 2*l2+1; m2++){
for (int m = 0; m < 2*l+1; m++){
if (m1-l1 + m2-l2 == m-l){
float CGcoef = CG[CG_offset+(2*l+1)*m2+m];
for (t2 = 0; t2 < taus[l2]; t2++){
int t = taus[l2] * t1 + t2 + t_offset;
float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)];
float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)];
float real2 = tensor[IDX(b,l2,t2,m2,0,cum_taus,Lmax)];
float imag2 = tensor[IDX(b,l2,t2,m2,1,cum_taus,Lmax)];
real1 += (real * real2 + imag * imag2) * CGcoef;
imag1 += (real2 * imag - real * imag2) * CGcoef;
}
}
}
}
}
}
//Now switching to treat l1 as a "l2"
l2 = l1;
t2 = t1;
m2 = m1;
for (l1 = l2; l1 <= Lmax; l1++){
for (int l = l1 - l2; l <= Lmax && l <= l1 + l2; l++){
int CG_offset=0, t_offset=0;
for (int templ1=0; templ1 <= l1; templ1++){
for (int templ2=0; (templ2<l2 && templ1==l1) || (templ2<=templ1 && templ1<l1);templ2++){
int low = templ1-templ2, high=(templ2+templ1 > Lmax) ? Lmax : templ2+templ1;
for (int templ=low; templ<=high ; templ++){
CG_offset += (2*templ2+1)*(2*templ+1);
}
if (l <= templ1 + templ2 && l >= templ1 - templ2){
t_offset += taus[templ1]*taus[templ2];
}
}
}
for (int templ = l1-l2; templ<l; templ++){
CG_offset += (2*l2+1)*(templ*2+1);
}
for (m1 = 0; m1 < 2*l1+1; m1++){
for (int m = 0; m < 2*l+1; m++){
if (m1-l1 + m2-l2 == m-l){
float CGcoef = CG[CG_offset+(2*l+1)*m2+m];
for (t1 = 0; t1 < taus[l1]; t1++){
int t = taus[l2] * t1 + t2 + t_offset;
float real = g_out[IDX(b,l,t,m,0,cum_new_taus,Lmax)];
float imag = g_out[IDX(b,l,t,m,1,cum_new_taus,Lmax)];
//This time we need to access l1 t1 and m1
float real2 = tensor[IDX(b,l1,t1,m1,0,cum_taus,Lmax)];
float imag2 = tensor[IDX(b,l1,t1,m1,1,cum_taus,Lmax)];
real1 += (real * real2 + imag * imag2) * CGcoef;
imag1 += (real2 * imag - real * imag2) * CGcoef;
}
}
}
}
}
}
g_in[global_threadId*2] = real1;
g_in[global_threadId*2+1] = imag1;
}
}
} // namespace
void print_arr(int* v, int l){
printf("vector: (");
for (int i = 0; i < l; i++){
printf("%d, ", v[i]);
}
printf(")\n");
return;
}
int* _get_cum_tau(int* taus, int L){
int* cum_tau = (int*) malloc((L+2)*sizeof(int));
cum_tau[0] = 0;
for (int l = 0; l <= L; l++){
cum_tau[l+1] = cum_tau[l] + (2 * l + 1) * taus[l];
}
return cum_tau;
}
void CG_cuda_forward(
torch::Tensor input,
torch::Tensor output,
int L,
int B,
torch::Tensor taus_tensor){
//auto output = torch::zeros_like(old_cell);
float* F = input.data<float>();
float* out = output.data<float>();
int* taus = taus_tensor.data<int>();
//printf("len(taus) = %d\n", taus_tensor.size(0));
int* new_taus = (int*) calloc(L+1, sizeof(int));
for (int l1 = 0; l1 <= L; l1++){
for (int l2 = 0; l2 <= l1; l2++){
for (int l = l1-l2; l <=L && l <= l1 + l2; l++){
new_taus[l] += taus[l1] * taus[l2];
}
}
}
int* cum_tauIn_m = _get_cum_tau(taus, L);
int* cum_tauMiddle_m = _get_cum_tau(new_taus, L);
int size = B * (L+1) * (L+2) * (L+1) /2;
int LOGFACT_SIZE=5*L+20;
double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double));
for (int i = 2; i < LOGFACT_SIZE; i++){
logfact[i] = logfact[i-1] + log((double) i);
}
double* cuda_logfact;
cudaMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double));
cudaMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), cudaMemcpyHostToDevice);
int *cuda_tauIn, *cuda_cum_tauIn_m, *cuda_cum_tauMiddle_m;
cudaMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int));
cudaMemcpy(cuda_tauIn, taus, (L+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int));
cudaMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int));
cudaMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice);
cudaCG_forward_kernel<<<cuda_gridsize(size), NUM_THREADS, 0>>>(
F, out,
cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m,
cuda_logfact, L, B);
}
void CG_cuda_backward(
torch::Tensor input,
torch::Tensor grad_in,
torch::Tensor grad_out,
torch::Tensor CG_tensor,
int L,
int B,
torch::Tensor taus_tensor){
float* F = input.data<float>();
float* g_in = grad_in.data<float>();
float* g_out = grad_out.data<float>();
int* taus = taus_tensor.data<int>();
int* new_taus = (int*) calloc(L+1, sizeof(int));
for (int l1 = 0; l1 <= L; l1++){
for (int l2 = 0; l2 <= l1; l2++){
for (int l = l1-l2; l <=L && l <= l1 + l2; l++){
new_taus[l] += taus[l1] * taus[l2];
}
}
}
int* cum_tauIn_m = _get_cum_tau(taus, L);
int* cum_tauMiddle_m = _get_cum_tau(new_taus, L);
int *cuda_tauIn, *cuda_cum_tauIn_m, *cuda_cum_tauMiddle_m;
cudaMalloc((void**) &cuda_tauIn, (L+1)*sizeof(int));
cudaMalloc((void**) &cuda_cum_tauIn_m, (L+2)*sizeof(int));
cudaMalloc((void**) &cuda_cum_tauMiddle_m, (L+2)*sizeof(int));
cudaMemcpy(cuda_tauIn, taus, (L+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_cum_tauIn_m, cum_tauIn_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_cum_tauMiddle_m, cum_tauMiddle_m, (L+2)*sizeof(int), cudaMemcpyHostToDevice);
//Compute taus such that taus[l] is the starting position of the tensor
int size = B * cum_tauIn_m[L+1];
//printf("Need %d threads\n", size);
//Prep for CG
int LOGFACT_SIZE=5*L+20;
double* logfact = (double*) calloc(LOGFACT_SIZE, sizeof(double));
for (int i = 2; i < LOGFACT_SIZE; i++){
logfact[i] = logfact[i-1] + log((double) i);
}
double* cuda_logfact;
cudaMalloc((void**) &cuda_logfact, LOGFACT_SIZE*sizeof(double));
cudaMemcpy(cuda_logfact, logfact, LOGFACT_SIZE*sizeof(double), cudaMemcpyHostToDevice);
float* CG = CG_tensor.data<float>();
int size0 = (L+1)*(L+1)*(L+1)*(2*L+1);
cudaprecomputeCG_job<<<cuda_gridsize(size0), NUM_THREADS, 0>>>(
CG,
cuda_logfact, L, B);
cudaDeviceSynchronize();
cudaCG_backward_kernel<<<cuda_gridsize(size), NUM_THREADS, 0>>>(
F, g_in, g_out,
cuda_tauIn, cuda_cum_tauIn_m, cuda_cum_tauMiddle_m,
CG, L, B);
cudaDeviceSynchronize();
cudaFree(cuda_logfact);
cudaFree(cuda_tauIn);
cudaFree(cuda_cum_tauIn_m);
cudaFree(cuda_cum_tauMiddle_m);
free(logfact);
free(cum_tauIn_m);
free(cum_tauMiddle_m);
}
|
d19b47629f970c425ca928142613080179455d4e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_euclidean_norm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *vec = NULL;
hipMalloc(&vec, XSIZE*YSIZE);
int numElements = 1;
double *answer = NULL;
hipMalloc(&answer, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_euclidean_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,numElements,answer);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_euclidean_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,numElements,answer);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_euclidean_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,numElements,answer);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d19b47629f970c425ca928142613080179455d4e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_euclidean_norm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *vec = NULL;
cudaMalloc(&vec, XSIZE*YSIZE);
int numElements = 1;
double *answer = NULL;
cudaMalloc(&answer, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_euclidean_norm<<<gridBlock,threadBlock>>>(vec,numElements,answer);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_euclidean_norm<<<gridBlock,threadBlock>>>(vec,numElements,answer);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_euclidean_norm<<<gridBlock,threadBlock>>>(vec,numElements,answer);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
468b0da8152bd5290c1206388ce0a7d17ebebcea.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include "paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h"
#include "paddle/fluid/operators/detection/anchor_generator_op.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
#define PrepareParamsOnDevice() \
constexpr int data_size = 4; \
hipMalloc(&anchor_sizes_device_, anchor_sizes_.size() * data_size); \
hipMalloc(&aspect_ratios_device_, aspect_ratios_.size() * data_size); \
hipMalloc(&stride_device_, stride_.size() * data_size); \
hipMalloc(&variances_device_, variances_.size() * data_size); \
hipMemcpy(anchor_sizes_device_, anchor_sizes_.data(), \
anchor_sizes_.size() * data_size, hipMemcpyHostToDevice); \
hipMemcpy(aspect_ratios_device_, aspect_ratios_.data(), \
aspect_ratios_.size() * data_size, hipMemcpyHostToDevice); \
hipMemcpy(stride_device_, stride_.data(), stride_.size() * data_size, \
hipMemcpyHostToDevice); \
hipMemcpy(variances_device_, variances_.data(), \
variances_.size() * data_size, hipMemcpyHostToDevice);
AnchorGeneratorPlugin::AnchorGeneratorPlugin(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset, const int height,
const int width, const int num_anchors, const int box_num)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
height_(height),
width_(width),
num_anchors_(num_anchors),
box_num_(box_num) {
// anchors must be float32, which is the generator proposals' input
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(height_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts height "
"greater than 0, but receive height = %d.",
height_));
PADDLE_ENFORCE_GE(width_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts width "
"greater than 0, but receive width = %d.",
width_));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PADDLE_ENFORCE_GE(box_num_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts box_num "
"greater than 0, but receive box_num = %d.",
box_num_));
PrepareParamsOnDevice();
}
AnchorGeneratorPlugin::~AnchorGeneratorPlugin() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
hipFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPlugin::AnchorGeneratorPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &height_);
DeserializeValue(&data, &length, &width_);
DeserializeValue(&data, &length, &num_anchors_);
DeserializeValue(&data, &length, &box_num_);
PrepareParamsOnDevice();
}
const char* AnchorGeneratorPlugin::getPluginType() const {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPlugin::getPluginVersion() const { return "1"; }
int AnchorGeneratorPlugin::getNbOutputs() const { return 2; }
nvinfer1::Dims AnchorGeneratorPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* inputs, int nb_input_dims) {
nvinfer1::Dims dims{};
dims.nbDims = 4;
dims.d[0] = height_;
dims.d[1] = width_;
dims.d[2] = num_anchors_;
dims.d[3] = 4;
return dims;
}
bool AnchorGeneratorPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::TensorFormat format) const {
// static shape plugin can't support different type between input/out
// it may cause addition overhead in half mode
return (type == data_type_ && format == nvinfer1::TensorFormat::kLINEAR);
}
size_t AnchorGeneratorPlugin::getWorkspaceSize(int max_batch_size) const {
return 0;
}
template <typename T>
int AnchorGeneratorPlugin::enqueue_impl(int batch_size,
const void* const* inputs,
void** outputs, void* workspace,
hipStream_t stream) {
const int block = 512;
const int gen_anchor_grid = (box_num_ + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
hipLaunchKernelGGL(( paddle::operators::GenAnchors<T>), dim3(gen_anchor_grid), dim3(block), 0, stream,
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height_, width_,
offset_);
const int var_grid = (box_num_ * 4 + block - 1) / block;
hipLaunchKernelGGL(( paddle::operators::SetVariance<T>), dim3(var_grid), dim3(block), 0, stream,
vars, variances_device, variances_.size(), box_num_ * 4);
return hipGetLastError() != hipSuccess;
}
int AnchorGeneratorPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace,
#else
void* const* outputs, void* workspace,
#endif
hipStream_t stream) {
return enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
}
int AnchorGeneratorPlugin::initialize() { return 0; }
void AnchorGeneratorPlugin::terminate() {}
size_t AnchorGeneratorPlugin::getSerializationSize() const {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(height_);
serialize_size += SerializedSize(width_);
serialize_size += SerializedSize(num_anchors_);
serialize_size += SerializedSize(box_num_);
return serialize_size;
}
void AnchorGeneratorPlugin::serialize(void* buffer) const {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, height_);
SerializeValue(&buffer, width_);
SerializeValue(&buffer, num_anchors_);
SerializeValue(&buffer, box_num_);
}
void AnchorGeneratorPlugin::destroy() {}
void AnchorGeneratorPlugin::setPluginNamespace(const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPlugin::getPluginNamespace() const {
return namespace_.c_str();
}
nvinfer1::DataType AnchorGeneratorPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type, int nb_inputs) const {
return input_type[0];
}
bool AnchorGeneratorPlugin::isOutputBroadcastAcrossBatch(
int output_index, const bool* input_is_broadcast, int nb_inputs) const {
return true;
}
bool AnchorGeneratorPlugin::canBroadcastInputAcrossBatch(
int input_index) const {
return false;
}
void AnchorGeneratorPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) {}
nvinfer1::IPluginV2Ext* AnchorGeneratorPlugin::clone() const {
auto plugin = new AnchorGeneratorPlugin(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
height_, width_, num_anchors_, box_num_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
void AnchorGeneratorPluginCreator::setPluginNamespace(
const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginCreator::getPluginNamespace() const {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginCreator::getPluginName() const {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPluginCreator::getPluginVersion() const {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginCreator::getFieldNames() {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int height = -1, width = -1;
int num_anchors = -1;
int box_num = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("height")) {
height = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("width")) {
width = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("box_num")) {
box_num = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPlugin(nvinfer1::DataType::kFLOAT, anchor_sizes,
aspect_ratios, stride, variances, offset,
height, width, num_anchors, box_num);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::deserializePlugin(
const char* name, const void* serial_data, size_t serial_length) {
auto plugin = new AnchorGeneratorPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#if IS_TRT_VERSION_GE(6000)
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset,
const int num_anchors)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
num_anchors_(num_anchors) {
// data_type_ is used to determine the output data type
// data_type_ can only be float32
// height, width, num_anchors are calculated at configurePlugin
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PrepareParamsOnDevice();
}
AnchorGeneratorPluginDynamic::~AnchorGeneratorPluginDynamic() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
hipFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(void const* data,
size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &num_anchors_);
PrepareParamsOnDevice();
}
nvinfer1::IPluginV2DynamicExt* AnchorGeneratorPluginDynamic::clone() const {
auto plugin = new AnchorGeneratorPluginDynamic(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
num_anchors_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
nvinfer1::DimsExprs AnchorGeneratorPluginDynamic::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) {
nvinfer1::DimsExprs ret{};
ret.nbDims = 4;
ret.d[0] = inputs[0].d[2]; // feature height
ret.d[1] = inputs[0].d[3]; // feature width
ret.d[2] = exprBuilder.constant(num_anchors_);
ret.d[3] = exprBuilder.constant(4);
return ret;
}
bool AnchorGeneratorPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) {
// input can be any, doesn't matter
// anchor generator doesn't read input raw data, only need the shape info
auto type = inOut[pos].type;
auto format = inOut[pos].format;
#if IS_TRT_VERSION_GE(7234)
if (pos == 0) return true;
#else
if (pos == 0) return format == nvinfer1::TensorFormat::kLINEAR;
#endif
return (type == nvinfer1::DataType::kFLOAT &&
format == nvinfer1::TensorFormat::kLINEAR);
}
void AnchorGeneratorPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) {}
size_t AnchorGeneratorPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const {
return 0;
}
template <typename T>
int AnchorGeneratorPluginDynamic::enqueue_impl(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, hipStream_t stream) {
const int height = inputDesc[0].dims.d[2];
const int width = inputDesc[0].dims.d[3];
const int box_num = height * width * num_anchors_;
const int block = 512;
const int gen_anchor_grid = (box_num + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
hipLaunchKernelGGL(( paddle::operators::GenAnchors<T>), dim3(gen_anchor_grid), dim3(block), 0, stream,
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height, width,
offset_);
const int var_grid = (box_num * 4 + block - 1) / block;
hipLaunchKernelGGL(( paddle::operators::SetVariance<T>), dim3(var_grid), dim3(block), 0, stream,
vars, variances_device, variances_.size(), box_num * 4);
return hipGetLastError() != hipSuccess;
}
int AnchorGeneratorPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, hipStream_t stream) {
assert(outputDesc[0].type == nvinfer1::DataType::kFLOAT);
assert(outputDesc[1].type == nvinfer1::DataType::kFLOAT);
return enqueue_impl<float>(inputDesc, outputDesc, inputs, outputs, workspace,
stream);
}
nvinfer1::DataType AnchorGeneratorPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const {
return inputTypes[0];
}
const char* AnchorGeneratorPluginDynamic::getPluginType() const {
return "anchor_generator_plugin_dynamic";
}
int AnchorGeneratorPluginDynamic::getNbOutputs() const { return 2; }
int AnchorGeneratorPluginDynamic::initialize() { return 0; }
void AnchorGeneratorPluginDynamic::terminate() {}
size_t AnchorGeneratorPluginDynamic::getSerializationSize() const {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(num_anchors_);
return serialize_size;
}
void AnchorGeneratorPluginDynamic::serialize(void* buffer) const {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, num_anchors_);
}
void AnchorGeneratorPluginDynamic::destroy() {}
void AnchorGeneratorPluginDynamicCreator::setPluginNamespace(
const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginNamespace() const {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginName() const {
return "anchor_generator_plugin_dynamic";
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginVersion() const {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginDynamicCreator::getFieldNames() {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int num_anchors = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPluginDynamic(nvinfer1::DataType::kFLOAT,
anchor_sizes, aspect_ratios, stride,
variances, offset, num_anchors);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data, size_t serial_length) {
auto plugin = new AnchorGeneratorPluginDynamic(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 468b0da8152bd5290c1206388ce0a7d17ebebcea.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <cassert>
#include "paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h"
#include "paddle/fluid/operators/detection/anchor_generator_op.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
#define PrepareParamsOnDevice() \
constexpr int data_size = 4; \
cudaMalloc(&anchor_sizes_device_, anchor_sizes_.size() * data_size); \
cudaMalloc(&aspect_ratios_device_, aspect_ratios_.size() * data_size); \
cudaMalloc(&stride_device_, stride_.size() * data_size); \
cudaMalloc(&variances_device_, variances_.size() * data_size); \
cudaMemcpy(anchor_sizes_device_, anchor_sizes_.data(), \
anchor_sizes_.size() * data_size, cudaMemcpyHostToDevice); \
cudaMemcpy(aspect_ratios_device_, aspect_ratios_.data(), \
aspect_ratios_.size() * data_size, cudaMemcpyHostToDevice); \
cudaMemcpy(stride_device_, stride_.data(), stride_.size() * data_size, \
cudaMemcpyHostToDevice); \
cudaMemcpy(variances_device_, variances_.data(), \
variances_.size() * data_size, cudaMemcpyHostToDevice);
AnchorGeneratorPlugin::AnchorGeneratorPlugin(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset, const int height,
const int width, const int num_anchors, const int box_num)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
height_(height),
width_(width),
num_anchors_(num_anchors),
box_num_(box_num) {
// anchors must be float32, which is the generator proposals' input
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(height_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts height "
"greater than 0, but receive height = %d.",
height_));
PADDLE_ENFORCE_GE(width_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts width "
"greater than 0, but receive width = %d.",
width_));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PADDLE_ENFORCE_GE(box_num_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts box_num "
"greater than 0, but receive box_num = %d.",
box_num_));
PrepareParamsOnDevice();
}
AnchorGeneratorPlugin::~AnchorGeneratorPlugin() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
cudaFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPlugin::AnchorGeneratorPlugin(const void* data, size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &height_);
DeserializeValue(&data, &length, &width_);
DeserializeValue(&data, &length, &num_anchors_);
DeserializeValue(&data, &length, &box_num_);
PrepareParamsOnDevice();
}
const char* AnchorGeneratorPlugin::getPluginType() const {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPlugin::getPluginVersion() const { return "1"; }
int AnchorGeneratorPlugin::getNbOutputs() const { return 2; }
nvinfer1::Dims AnchorGeneratorPlugin::getOutputDimensions(
int index, const nvinfer1::Dims* inputs, int nb_input_dims) {
nvinfer1::Dims dims{};
dims.nbDims = 4;
dims.d[0] = height_;
dims.d[1] = width_;
dims.d[2] = num_anchors_;
dims.d[3] = 4;
return dims;
}
bool AnchorGeneratorPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::TensorFormat format) const {
// static shape plugin can't support different type between input/out
// it may cause addition overhead in half mode
return (type == data_type_ && format == nvinfer1::TensorFormat::kLINEAR);
}
size_t AnchorGeneratorPlugin::getWorkspaceSize(int max_batch_size) const {
return 0;
}
template <typename T>
int AnchorGeneratorPlugin::enqueue_impl(int batch_size,
const void* const* inputs,
void** outputs, void* workspace,
cudaStream_t stream) {
const int block = 512;
const int gen_anchor_grid = (box_num_ + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
paddle::operators::GenAnchors<T><<<gen_anchor_grid, block, 0, stream>>>(
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height_, width_,
offset_);
const int var_grid = (box_num_ * 4 + block - 1) / block;
paddle::operators::SetVariance<T><<<var_grid, block, 0, stream>>>(
vars, variances_device, variances_.size(), box_num_ * 4);
return cudaGetLastError() != cudaSuccess;
}
int AnchorGeneratorPlugin::enqueue(int batch_size, const void* const* inputs,
#if IS_TRT_VERSION_LT(8000)
void** outputs, void* workspace,
#else
void* const* outputs, void* workspace,
#endif
cudaStream_t stream) {
return enqueue_impl<float>(batch_size, inputs, outputs, workspace, stream);
}
int AnchorGeneratorPlugin::initialize() { return 0; }
void AnchorGeneratorPlugin::terminate() {}
size_t AnchorGeneratorPlugin::getSerializationSize() const {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(height_);
serialize_size += SerializedSize(width_);
serialize_size += SerializedSize(num_anchors_);
serialize_size += SerializedSize(box_num_);
return serialize_size;
}
void AnchorGeneratorPlugin::serialize(void* buffer) const {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, height_);
SerializeValue(&buffer, width_);
SerializeValue(&buffer, num_anchors_);
SerializeValue(&buffer, box_num_);
}
void AnchorGeneratorPlugin::destroy() {}
void AnchorGeneratorPlugin::setPluginNamespace(const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPlugin::getPluginNamespace() const {
return namespace_.c_str();
}
nvinfer1::DataType AnchorGeneratorPlugin::getOutputDataType(
int index, const nvinfer1::DataType* input_type, int nb_inputs) const {
return input_type[0];
}
bool AnchorGeneratorPlugin::isOutputBroadcastAcrossBatch(
int output_index, const bool* input_is_broadcast, int nb_inputs) const {
return true;
}
bool AnchorGeneratorPlugin::canBroadcastInputAcrossBatch(
int input_index) const {
return false;
}
void AnchorGeneratorPlugin::configurePlugin(
const nvinfer1::Dims* input_dims, int nb_inputs,
const nvinfer1::Dims* output_dims, int nb_outputs,
const nvinfer1::DataType* input_types,
const nvinfer1::DataType* output_types, const bool* input_is_broadcast,
const bool* output_is_broadcast, nvinfer1::PluginFormat float_format,
int max_batct_size) {}
nvinfer1::IPluginV2Ext* AnchorGeneratorPlugin::clone() const {
auto plugin = new AnchorGeneratorPlugin(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
height_, width_, num_anchors_, box_num_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
void AnchorGeneratorPluginCreator::setPluginNamespace(
const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginCreator::getPluginNamespace() const {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginCreator::getPluginName() const {
return "anchor_generator_plugin";
}
const char* AnchorGeneratorPluginCreator::getPluginVersion() const {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginCreator::getFieldNames() {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int height = -1, width = -1;
int num_anchors = -1;
int box_num = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("height")) {
height = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("width")) {
width = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("box_num")) {
box_num = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPlugin(nvinfer1::DataType::kFLOAT, anchor_sizes,
aspect_ratios, stride, variances, offset,
height, width, num_anchors, box_num);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginCreator::deserializePlugin(
const char* name, const void* serial_data, size_t serial_length) {
auto plugin = new AnchorGeneratorPlugin(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#if IS_TRT_VERSION_GE(6000)
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(
const nvinfer1::DataType data_type, const std::vector<float>& anchor_sizes,
const std::vector<float>& aspect_ratios, const std::vector<float>& stride,
const std::vector<float>& variances, const float offset,
const int num_anchors)
: data_type_(data_type),
anchor_sizes_(anchor_sizes),
aspect_ratios_(aspect_ratios),
stride_(stride),
variances_(variances),
offset_(offset),
num_anchors_(num_anchors) {
// data_type_ is used to determine the output data type
// data_type_ can only be float32
// height, width, num_anchors are calculated at configurePlugin
PADDLE_ENFORCE_EQ(data_type_, nvinfer1::DataType::kFLOAT,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts float32."));
PADDLE_ENFORCE_GE(
num_anchors_, 0,
platform::errors::InvalidArgument(
"TRT anchor generator plugin only accepts number of anchors greater "
"than 0, but receive number of anchors = %d.",
num_anchors_));
PrepareParamsOnDevice();
}
AnchorGeneratorPluginDynamic::~AnchorGeneratorPluginDynamic() {
auto release_device_ptr = [](void* ptr) {
if (ptr) {
cudaFree(ptr);
ptr = nullptr;
}
};
release_device_ptr(anchor_sizes_device_);
release_device_ptr(aspect_ratios_device_);
release_device_ptr(stride_device_);
release_device_ptr(variances_device_);
}
AnchorGeneratorPluginDynamic::AnchorGeneratorPluginDynamic(void const* data,
size_t length) {
DeserializeValue(&data, &length, &data_type_);
DeserializeValue(&data, &length, &anchor_sizes_);
DeserializeValue(&data, &length, &aspect_ratios_);
DeserializeValue(&data, &length, &stride_);
DeserializeValue(&data, &length, &variances_);
DeserializeValue(&data, &length, &offset_);
DeserializeValue(&data, &length, &num_anchors_);
PrepareParamsOnDevice();
}
nvinfer1::IPluginV2DynamicExt* AnchorGeneratorPluginDynamic::clone() const {
auto plugin = new AnchorGeneratorPluginDynamic(
data_type_, anchor_sizes_, aspect_ratios_, stride_, variances_, offset_,
num_anchors_);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
nvinfer1::DimsExprs AnchorGeneratorPluginDynamic::getOutputDimensions(
int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs,
nvinfer1::IExprBuilder& exprBuilder) {
nvinfer1::DimsExprs ret{};
ret.nbDims = 4;
ret.d[0] = inputs[0].d[2]; // feature height
ret.d[1] = inputs[0].d[3]; // feature width
ret.d[2] = exprBuilder.constant(num_anchors_);
ret.d[3] = exprBuilder.constant(4);
return ret;
}
bool AnchorGeneratorPluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc* inOut, int nbInputs,
int nbOutputs) {
// input can be any, doesn't matter
// anchor generator doesn't read input raw data, only need the shape info
auto type = inOut[pos].type;
auto format = inOut[pos].format;
#if IS_TRT_VERSION_GE(7234)
if (pos == 0) return true;
#else
if (pos == 0) return format == nvinfer1::TensorFormat::kLINEAR;
#endif
return (type == nvinfer1::DataType::kFLOAT &&
format == nvinfer1::TensorFormat::kLINEAR);
}
void AnchorGeneratorPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc* in, int nbInputs,
const nvinfer1::DynamicPluginTensorDesc* out, int nbOutputs) {}
size_t AnchorGeneratorPluginDynamic::getWorkspaceSize(
const nvinfer1::PluginTensorDesc* inputs, int nbInputs,
const nvinfer1::PluginTensorDesc* outputs, int nbOutputs) const {
return 0;
}
template <typename T>
int AnchorGeneratorPluginDynamic::enqueue_impl(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, cudaStream_t stream) {
const int height = inputDesc[0].dims.d[2];
const int width = inputDesc[0].dims.d[3];
const int box_num = height * width * num_anchors_;
const int block = 512;
const int gen_anchor_grid = (box_num + block - 1) / block;
T* anchors = static_cast<T*>(outputs[0]);
T* vars = static_cast<T*>(outputs[1]);
const T* anchor_sizes_device = static_cast<const T*>(anchor_sizes_device_);
const T* aspect_ratios_device = static_cast<const T*>(aspect_ratios_device_);
const T* stride_device = static_cast<const T*>(stride_device_);
const T* variances_device = static_cast<const T*>(variances_device_);
paddle::operators::GenAnchors<T><<<gen_anchor_grid, block, 0, stream>>>(
anchors, aspect_ratios_device, aspect_ratios_.size(), anchor_sizes_device,
anchor_sizes_.size(), stride_device, stride_.size(), height, width,
offset_);
const int var_grid = (box_num * 4 + block - 1) / block;
paddle::operators::SetVariance<T><<<var_grid, block, 0, stream>>>(
vars, variances_device, variances_.size(), box_num * 4);
return cudaGetLastError() != cudaSuccess;
}
int AnchorGeneratorPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc* inputDesc,
const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs,
void* const* outputs, void* workspace, cudaStream_t stream) {
assert(outputDesc[0].type == nvinfer1::DataType::kFLOAT);
assert(outputDesc[1].type == nvinfer1::DataType::kFLOAT);
return enqueue_impl<float>(inputDesc, outputDesc, inputs, outputs, workspace,
stream);
}
nvinfer1::DataType AnchorGeneratorPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const {
return inputTypes[0];
}
const char* AnchorGeneratorPluginDynamic::getPluginType() const {
return "anchor_generator_plugin_dynamic";
}
int AnchorGeneratorPluginDynamic::getNbOutputs() const { return 2; }
int AnchorGeneratorPluginDynamic::initialize() { return 0; }
void AnchorGeneratorPluginDynamic::terminate() {}
size_t AnchorGeneratorPluginDynamic::getSerializationSize() const {
size_t serialize_size = 0;
serialize_size += SerializedSize(data_type_);
serialize_size += SerializedSize(anchor_sizes_);
serialize_size += SerializedSize(aspect_ratios_);
serialize_size += SerializedSize(stride_);
serialize_size += SerializedSize(variances_);
serialize_size += SerializedSize(offset_);
serialize_size += SerializedSize(num_anchors_);
return serialize_size;
}
void AnchorGeneratorPluginDynamic::serialize(void* buffer) const {
SerializeValue(&buffer, data_type_);
SerializeValue(&buffer, anchor_sizes_);
SerializeValue(&buffer, aspect_ratios_);
SerializeValue(&buffer, stride_);
SerializeValue(&buffer, variances_);
SerializeValue(&buffer, offset_);
SerializeValue(&buffer, num_anchors_);
}
void AnchorGeneratorPluginDynamic::destroy() {}
void AnchorGeneratorPluginDynamicCreator::setPluginNamespace(
const char* lib_namespace) {
namespace_ = std::string(lib_namespace);
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginNamespace() const {
return namespace_.c_str();
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginName() const {
return "anchor_generator_plugin_dynamic";
}
const char* AnchorGeneratorPluginDynamicCreator::getPluginVersion() const {
return "1";
}
const nvinfer1::PluginFieldCollection*
AnchorGeneratorPluginDynamicCreator::getFieldNames() {
return &field_collection_;
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::createPlugin(
const char* name, const nvinfer1::PluginFieldCollection* fc) {
const nvinfer1::PluginField* fields = fc->fields;
int type_id = -1;
std::vector<float> anchor_sizes, aspect_ratios, stride, variances;
float offset = .5;
int num_anchors = -1;
for (int i = 0; i < fc->nbFields; ++i) {
const std::string field_name(fc->fields[i].name);
const auto length = fc->fields[i].length;
if (field_name.compare("type_id") == 0) {
type_id = *static_cast<const int*>(fc->fields[i].data);
} else if (field_name.compare("anchor_sizes")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
anchor_sizes.insert(anchor_sizes.end(), data, data + length);
} else if (field_name.compare("aspect_ratios")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
aspect_ratios.insert(aspect_ratios.end(), data, data + length);
} else if (field_name.compare("stride")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
stride.insert(stride.end(), data, data + length);
} else if (field_name.compare("variances")) {
const auto* data = static_cast<const float*>(fc->fields[i].data);
variances.insert(variances.end(), data, data + length);
} else if (field_name.compare("offset")) {
offset = *static_cast<const float*>(fc->fields[i].data);
} else if (field_name.compare("num_anchors")) {
num_anchors = *static_cast<const int*>(fc->fields[i].data);
} else {
assert(false && "unknown plugin field name.");
}
}
return new AnchorGeneratorPluginDynamic(nvinfer1::DataType::kFLOAT,
anchor_sizes, aspect_ratios, stride,
variances, offset, num_anchors);
}
nvinfer1::IPluginV2Ext* AnchorGeneratorPluginDynamicCreator::deserializePlugin(
const char* name, const void* serial_data, size_t serial_length) {
auto plugin = new AnchorGeneratorPluginDynamic(serial_data, serial_length);
plugin->setPluginNamespace(namespace_.c_str());
return plugin;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
5e96c1a1108acb9d4ba291a570297e2bb6898939.hip | // !!! This is a file automatically generated by hipify!!!
/* The MathWorks Inc. 2019*/
/* ResNet50 demo main.cu file with OpenCV interfaces to read and display data. */
#include "resnet50_wrapper.h"
#include "main_resnet50.h"
#include "resnet50_wrapper_terminate.h"
#include "resnet50_wrapper_initialize.h"
#include "opencv2/opencv.hpp"
#include <stdio.h>
#include <stdlib.h>
#define IMG_WIDTH 224
#define IMG_HEIGHT 224
#define IMG_CH 3
#define VID_DEV_ID -1
using namespace cv;
using namespace std;
static void main_resnet50_wrapper();
/*
* Convert BGR data to RGB data, without this conversion the predictions
* will be bad
*/
static void argInit_224x224x3_real32_T(real32_T *input, Mat & im)
{
for(int j=0;j<224*224;j++)
{
//BGR to RGB
input[2*224*224+j]=(float)(im.data[j*3+0]);
input[1*224*224+j]=(float)(im.data[j*3+1]);
input[0*224*224+j]=(float)(im.data[j*3+2]);
}
}
int cmpfunc(const void * a, const void * b, void * r)
{
float x = ((float*)r)[*(int*)b] - ((float*)r)[*(int*)a] ;
return ( x > 0 ? ceil(x) : floor(x) );
}
void top( float* r, int* top5 )
{
int t[1000];
for(int i=0; i<1000; i++)
t[i]=i;
qsort_r(t, 1000, sizeof(int), cmpfunc, r);
top5[0]=t[0];
top5[1]=t[1];
top5[2]=t[2];
top5[3]=t[3];
top5[4]=t[4];
return;
}
/* Write the prediction scores on the output video frame */
void writeData(float *output, char synsetWords[1000][100], Mat & frame, float fps)
{
int top5[5];
top(output, top5);
copyMakeBorder(frame, frame, 0, 0, 400, 0, BORDER_CONSTANT, CV_RGB(0,0,0));
char strbuf[50];
sprintf (strbuf, "%.2f FPS", fps);
putText(frame, strbuf, Point(30,30), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[0]]*100, synsetWords[top5[0]]);
putText(frame, strbuf, Point(30,80), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[1]]*100, synsetWords[top5[1]]);
putText(frame, strbuf, Point(30,130), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[2]]*100, synsetWords[top5[2]]);
putText(frame, strbuf, Point(30,180), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[3]]*100, synsetWords[top5[3]]);
putText(frame, strbuf, Point(30,230), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[4]]*100, synsetWords[top5[4]]);
putText(frame, strbuf, Point(30,280), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
imshow("resnet Demo", frame);
}
/* Read the class lables from the .txt file*/
int prepareSynset(char synsets[1000][100])
{
FILE* fp1 = fopen("synsetWords_resnet50.txt", "r");
if (fp1 == 0) return -1;
for(int i=0; i<1000; i++)
{
fgets(synsets[i], 100, fp1);
strtok(synsets[i], "\n");
}
fclose(fp1);
return 0;
}
static void main_resnet50_wrapper(void)
{
real32_T out[1000];
static real32_T b[150528];
char synsetWords[1000][100];
if (prepareSynset(synsetWords) == -1)
{
printf("ERROR: Unable to find synsetWords_resnet50.txt\n");
exit(0);
}
Mat oFrame, cFrame;
/* Initialize function 'resnet50_wrapper' input arguments. */
/* Initialize function input argument 'in'. */
/* Call the entry-point 'resnet50_wrapper'. */
/* Create a Video capture object */
VideoCapture cap(VID_DEV_ID);
if(!cap.isOpened())
{
cout << "can't open camera" << endl;
exit(0);
}
namedWindow("resnet Demo",WINDOW_NORMAL );
resizeWindow("resnet Demo", 1000,1000);
float fps=0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
while(1)
{
cap >> oFrame;
resize(oFrame,cFrame,Size(IMG_WIDTH,IMG_HEIGHT));
/* convert from BGR to RGB*/
argInit_224x224x3_real32_T(b,cFrame);
hipEventRecord(start);
/* call the resent predict function*/
resnet50_wrapper(b, out);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = -1.0;
hipEventElapsedTime(&milliseconds, start, stop);
fps = fps*.9+1000.0/milliseconds*.1;
/* Write the prediction on the output frame */
writeData(out, synsetWords, oFrame, fps);
if(waitKey(1)%256 == 27 ) break; // stop when ESC key is pressed
}
}
int32_T main(int32_T argc, const char * const argv[])
{
(void)argc;
(void)argv;
/* Call the application intialize function */
resnet50_wrapper_initialize();
/* Call the resnet predict function */
main_resnet50_wrapper();
/* Call the application terminate function */
resnet50_wrapper_terminate();
return 0;
}
| 5e96c1a1108acb9d4ba291a570297e2bb6898939.cu | /* The MathWorks Inc. 2019*/
/* ResNet50 demo main.cu file with OpenCV interfaces to read and display data. */
#include "resnet50_wrapper.h"
#include "main_resnet50.h"
#include "resnet50_wrapper_terminate.h"
#include "resnet50_wrapper_initialize.h"
#include "opencv2/opencv.hpp"
#include <stdio.h>
#include <stdlib.h>
#define IMG_WIDTH 224
#define IMG_HEIGHT 224
#define IMG_CH 3
#define VID_DEV_ID -1
using namespace cv;
using namespace std;
static void main_resnet50_wrapper();
/*
* Convert BGR data to RGB data, without this conversion the predictions
* will be bad
*/
static void argInit_224x224x3_real32_T(real32_T *input, Mat & im)
{
for(int j=0;j<224*224;j++)
{
//BGR to RGB
input[2*224*224+j]=(float)(im.data[j*3+0]);
input[1*224*224+j]=(float)(im.data[j*3+1]);
input[0*224*224+j]=(float)(im.data[j*3+2]);
}
}
int cmpfunc(const void * a, const void * b, void * r)
{
float x = ((float*)r)[*(int*)b] - ((float*)r)[*(int*)a] ;
return ( x > 0 ? ceil(x) : floor(x) );
}
void top( float* r, int* top5 )
{
int t[1000];
for(int i=0; i<1000; i++)
t[i]=i;
qsort_r(t, 1000, sizeof(int), cmpfunc, r);
top5[0]=t[0];
top5[1]=t[1];
top5[2]=t[2];
top5[3]=t[3];
top5[4]=t[4];
return;
}
/* Write the prediction scores on the output video frame */
void writeData(float *output, char synsetWords[1000][100], Mat & frame, float fps)
{
int top5[5];
top(output, top5);
copyMakeBorder(frame, frame, 0, 0, 400, 0, BORDER_CONSTANT, CV_RGB(0,0,0));
char strbuf[50];
sprintf (strbuf, "%.2f FPS", fps);
putText(frame, strbuf, Point(30,30), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[0]]*100, synsetWords[top5[0]]);
putText(frame, strbuf, Point(30,80), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[1]]*100, synsetWords[top5[1]]);
putText(frame, strbuf, Point(30,130), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[2]]*100, synsetWords[top5[2]]);
putText(frame, strbuf, Point(30,180), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[3]]*100, synsetWords[top5[3]]);
putText(frame, strbuf, Point(30,230), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
sprintf(strbuf, "%4.1f%% %s", output[top5[4]]*100, synsetWords[top5[4]]);
putText(frame, strbuf, Point(30,280), FONT_HERSHEY_DUPLEX , 1.0, CV_RGB(220,220,220), 1);
imshow("resnet Demo", frame);
}
/* Read the class lables from the .txt file*/
int prepareSynset(char synsets[1000][100])
{
FILE* fp1 = fopen("synsetWords_resnet50.txt", "r");
if (fp1 == 0) return -1;
for(int i=0; i<1000; i++)
{
fgets(synsets[i], 100, fp1);
strtok(synsets[i], "\n");
}
fclose(fp1);
return 0;
}
static void main_resnet50_wrapper(void)
{
real32_T out[1000];
static real32_T b[150528];
char synsetWords[1000][100];
if (prepareSynset(synsetWords) == -1)
{
printf("ERROR: Unable to find synsetWords_resnet50.txt\n");
exit(0);
}
Mat oFrame, cFrame;
/* Initialize function 'resnet50_wrapper' input arguments. */
/* Initialize function input argument 'in'. */
/* Call the entry-point 'resnet50_wrapper'. */
/* Create a Video capture object */
VideoCapture cap(VID_DEV_ID);
if(!cap.isOpened())
{
cout << "can't open camera" << endl;
exit(0);
}
namedWindow("resnet Demo",WINDOW_NORMAL );
resizeWindow("resnet Demo", 1000,1000);
float fps=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
while(1)
{
cap >> oFrame;
resize(oFrame,cFrame,Size(IMG_WIDTH,IMG_HEIGHT));
/* convert from BGR to RGB*/
argInit_224x224x3_real32_T(b,cFrame);
cudaEventRecord(start);
/* call the resent predict function*/
resnet50_wrapper(b, out);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = -1.0;
cudaEventElapsedTime(&milliseconds, start, stop);
fps = fps*.9+1000.0/milliseconds*.1;
/* Write the prediction on the output frame */
writeData(out, synsetWords, oFrame, fps);
if(waitKey(1)%256 == 27 ) break; // stop when ESC key is pressed
}
}
int32_T main(int32_T argc, const char * const argv[])
{
(void)argc;
(void)argv;
/* Call the application intialize function */
resnet50_wrapper_initialize();
/* Call the resnet predict function */
main_resnet50_wrapper();
/* Call the application terminate function */
resnet50_wrapper_terminate();
return 0;
}
|
f9009ba0f388ea7454fd9a980f8ae137e472aff9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include "constants.h"
__global__ void sim_kernel(double *pos, double *vel, double *acc, double *mas){
cooperative_groups::grid_group g = cooperative_groups::this_grid();
int particle_id = blockIdx.x *blockDim.x + threadIdx.x;
for (int t = 0; t < CUDA_TIME_LENGTH; t++) {
// Update pos[t+1]
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] =
pos[(t % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] +
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] * dt +
0.5 * acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] * dt *
dt;
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] =
pos[(t % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] +
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] * dt +
0.5 * acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] * dt *
dt;
g.sync();
// Update acc[t+1]
double acc_x = 0, acc_y = 0;
for (int i = 0; i < N_PARTICLE; i++) {
if (i == particle_id) continue;
double dx =
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] -
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + i * DIMENSION + 0];
double dy =
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] -
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + i * DIMENSION + 1];
double r = sqrt(dx * dx + dy * dy) + POS_EPS;
acc_x += -G * mas[i] * dx / (r * r * r);
acc_y += -G * mas[i] * dy / (r * r * r);
}
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] = acc_x;
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] = acc_y;
g.sync();
// Update vel[t+1]
vel[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] =
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] +
0.5 *
(acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] +
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION +
0]) *
dt;
vel[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] =
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] +
0.5 *
(acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] +
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION +
1]) *
dt;
}
}
void call_cuda_sim(double *pos_host, double *vel_host, double *acc_host, double *mas_host) {
double *pos, *mas, *acc, *vel;
size_t pos_size = sizeof(double) * CUDA_TIME_LENGTH * N_PARTICLE * DIMENSION;
size_t mas_size = sizeof(double) * N_PARTICLE;
size_t vel_size = sizeof(double) * N_PARTICLE * 2 * DIMENSION;
size_t acc_size = sizeof(double) * N_PARTICLE * 2 * DIMENSION;
hipError_t err;
err = hipMalloc(&pos, pos_size);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMalloc(&mas, mas_size);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMalloc(&vel, vel_size);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMalloc(&acc, acc_size);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMemcpy(pos, pos_host, pos_size, hipMemcpyHostToDevice);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMemcpy(mas, mas_host, mas_size, hipMemcpyHostToDevice);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMemcpy(vel, vel_host, vel_size, hipMemcpyHostToDevice);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMemcpy(acc, acc_host, acc_size, hipMemcpyHostToDevice);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
const void* args[]= {&pos, &vel, &acc, &mas};
dim3 grid(N_PARTICLE / N_THREAD_PER_BLOCK, 1, 1);
dim3 block(N_THREAD_PER_BLOCK, 1, 1);
for(int t = 0;t < TIME_LENGTH / CUDA_TIME_LENGTH; ++t){
err = hipLaunchCooperativeKernel((void*)&sim_kernel, grid, block, (void**)args);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipMemcpy((char*)pos_host + pos_size * t + sizeof(double) * N_PARTICLE * DIMENSION, (char*)pos + sizeof(double) * N_PARTICLE * DIMENSION,
sizeof(double) * (CUDA_TIME_LENGTH - 1) * N_PARTICLE * DIMENSION, hipMemcpyDeviceToHost);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
if(t < TIME_LENGTH / CUDA_TIME_LENGTH - 1){
err = hipMemcpy((char*)pos_host + pos_size * (t + 1), pos,
sizeof(double) * N_PARTICLE * DIMENSION, hipMemcpyDeviceToHost);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
}
std::cout << t << " / " << TIME_LENGTH / CUDA_TIME_LENGTH << " has finished." << std::endl;
}
err = hipFree(pos);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipFree(mas);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipFree(acc);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = hipFree(vel);
if(err != hipSuccess){
std::cerr << "GPUassert: " << hipGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
}
| f9009ba0f388ea7454fd9a980f8ae137e472aff9.cu | #include <cooperative_groups.h>
#include <iostream>
#include "constants.h"
__global__ void sim_kernel(double *pos, double *vel, double *acc, double *mas){
cooperative_groups::grid_group g = cooperative_groups::this_grid();
int particle_id = blockIdx.x *blockDim.x + threadIdx.x;
for (int t = 0; t < CUDA_TIME_LENGTH; t++) {
// Update pos[t+1]
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] =
pos[(t % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] +
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] * dt +
0.5 * acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] * dt *
dt;
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] =
pos[(t % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] +
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] * dt +
0.5 * acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] * dt *
dt;
g.sync();
// Update acc[t+1]
double acc_x = 0, acc_y = 0;
for (int i = 0; i < N_PARTICLE; i++) {
if (i == particle_id) continue;
double dx =
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] -
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + i * DIMENSION + 0];
double dy =
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] -
pos[((t + 1) % CUDA_TIME_LENGTH) * N_PARTICLE * DIMENSION + i * DIMENSION + 1];
double r = sqrt(dx * dx + dy * dy) + POS_EPS;
acc_x += -G * mas[i] * dx / (r * r * r);
acc_y += -G * mas[i] * dy / (r * r * r);
}
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] = acc_x;
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] = acc_y;
g.sync();
// Update vel[t+1]
vel[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] =
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] +
0.5 *
(acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 0] +
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION +
0]) *
dt;
vel[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] =
vel[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] +
0.5 *
(acc[(t % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION + 1] +
acc[((t + 1) % 2) * N_PARTICLE * DIMENSION + particle_id * DIMENSION +
1]) *
dt;
}
}
void call_cuda_sim(double *pos_host, double *vel_host, double *acc_host, double *mas_host) {
double *pos, *mas, *acc, *vel;
size_t pos_size = sizeof(double) * CUDA_TIME_LENGTH * N_PARTICLE * DIMENSION;
size_t mas_size = sizeof(double) * N_PARTICLE;
size_t vel_size = sizeof(double) * N_PARTICLE * 2 * DIMENSION;
size_t acc_size = sizeof(double) * N_PARTICLE * 2 * DIMENSION;
cudaError_t err;
err = cudaMalloc(&pos, pos_size);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMalloc(&mas, mas_size);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMalloc(&vel, vel_size);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMalloc(&acc, acc_size);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMemcpy(pos, pos_host, pos_size, cudaMemcpyHostToDevice);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMemcpy(mas, mas_host, mas_size, cudaMemcpyHostToDevice);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMemcpy(vel, vel_host, vel_size, cudaMemcpyHostToDevice);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMemcpy(acc, acc_host, acc_size, cudaMemcpyHostToDevice);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
const void* args[]= {&pos, &vel, &acc, &mas};
dim3 grid(N_PARTICLE / N_THREAD_PER_BLOCK, 1, 1);
dim3 block(N_THREAD_PER_BLOCK, 1, 1);
for(int t = 0;t < TIME_LENGTH / CUDA_TIME_LENGTH; ++t){
err = cudaLaunchCooperativeKernel((void*)&sim_kernel, grid, block, (void**)args);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaMemcpy((char*)pos_host + pos_size * t + sizeof(double) * N_PARTICLE * DIMENSION, (char*)pos + sizeof(double) * N_PARTICLE * DIMENSION,
sizeof(double) * (CUDA_TIME_LENGTH - 1) * N_PARTICLE * DIMENSION, cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
if(t < TIME_LENGTH / CUDA_TIME_LENGTH - 1){
err = cudaMemcpy((char*)pos_host + pos_size * (t + 1), pos,
sizeof(double) * N_PARTICLE * DIMENSION, cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
}
std::cout << t << " / " << TIME_LENGTH / CUDA_TIME_LENGTH << " has finished." << std::endl;
}
err = cudaFree(pos);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaFree(mas);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaFree(acc);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
err = cudaFree(vel);
if(err != cudaSuccess){
std::cerr << "GPUassert: " << cudaGetErrorString(err) << " " << __FILE__ << " " << __LINE__ << std::endl;
}
}
|
801a054fb583523bfff2fdc6b42aa188bfb4f052.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// DONE
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k >= n) return;
bools[k] = idata[k] > 0 ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// DONE
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k >= n) return;
if (bools[k]) {
odata[indices[k]] = idata[k];
}
}
}
}
| 801a054fb583523bfff2fdc6b42aa188bfb4f052.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// DONE
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k >= n) return;
bools[k] = idata[k] > 0 ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// DONE
int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k >= n) return;
if (bools[k]) {
odata[indices[k]] = idata[k];
}
}
}
}
|
a7f90a66babbb70ee4a7893e5dab5e07cea126db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void blend(float *cmap, float* oldd, float* newd, float weight,int * params)
{
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int ch = params[0];
int ah = params[1];
int aw = params[2];
int slice_a = ah * aw;
int pitch_a = aw;
float thre = 0.05;
if (ax < aw&& ay < ah)
{
float fa = cmap[ay*pitch_a + ax];
if (fa < thre)
fa = 0.0f;
else fa = weight;
for (int i = 0; i < ch; i++)
{
newd[i*slice_a + ay*pitch_a + ax] = oldd[i*slice_a + ay*pitch_a + ax]* fa + newd[i*slice_a + ay*pitch_a + ax] * (1.0-fa);
}
}
} | a7f90a66babbb70ee4a7893e5dab5e07cea126db.cu | #include "includes.h"
__global__ void blend(float *cmap, float* oldd, float* newd, float weight,int * params)
{
int ax = blockIdx.x*blockDim.x + threadIdx.x;
int ay = blockIdx.y*blockDim.y + threadIdx.y;
int ch = params[0];
int ah = params[1];
int aw = params[2];
int slice_a = ah * aw;
int pitch_a = aw;
float thre = 0.05;
if (ax < aw&& ay < ah)
{
float fa = cmap[ay*pitch_a + ax];
if (fa < thre)
fa = 0.0f;
else fa = weight;
for (int i = 0; i < ch; i++)
{
newd[i*slice_a + ay*pitch_a + ax] = oldd[i*slice_a + ay*pitch_a + ax]* fa + newd[i*slice_a + ay*pitch_a + ax] * (1.0-fa);
}
}
} |
797bacd228ae744257fbe56a031c3eda95ab131d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling_gpu.cu
#include <stdio.h>
#include <stdlib.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
// select m points from n points.
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
// for a single batch
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
// select m times sequentially
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
// in each selection, we need to calc n times ('stride' threads in total)
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, hipStream_t stream) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1024>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
// Modified from
// https://github.com/qiqihaer/3DSSD-pytorch/blob/master/lib/pointnet2/src/sampling_gpu.cu
template <unsigned int block_size>
__global__ void furthest_point_sampling_with_dist_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, N)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0)
return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * n;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
// float x1 = dataset[old * 3 + 0];
// float y1 = dataset[old * 3 + 1];
// float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
// float x2, y2, z2;
// x2 = dataset[k * 3 + 0];
// y2 = dataset[k * 3 + 1];
// z2 = dataset[k * 3 + 2];
// float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) *
// (z2 - z1);
float d = dataset[old * n + k];
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m,
const float *dataset,
float *temp, int *idxs,
hipStream_t stream) {
// dataset: (B, N, N)
// temp: (B, N)
// output:
// idx: (B, M)
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<1024>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<512>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<256>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<128>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<64>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<32>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<16>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<8>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<4>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<2>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<1>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<512>), dim3(b), dim3(n_threads), 0, stream,
b, n, m, dataset, temp, idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 797bacd228ae744257fbe56a031c3eda95ab131d.cu | // Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling_gpu.cu
#include <stdio.h>
#include <stdlib.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
// select m points from n points.
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
// for a single batch
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
// select m times sequentially
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
// in each selection, we need to calc n times ('stride' threads in total)
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, cudaStream_t stream) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
// Modified from
// https://github.com/qiqihaer/3DSSD-pytorch/blob/master/lib/pointnet2/src/sampling_gpu.cu
template <unsigned int block_size>
__global__ void furthest_point_sampling_with_dist_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, N)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0)
return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * n;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
// float x1 = dataset[old * 3 + 0];
// float y1 = dataset[old * 3 + 1];
// float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
// float x2, y2, z2;
// x2 = dataset[k * 3 + 0];
// y2 = dataset[k * 3 + 1];
// z2 = dataset[k * 3 + 2];
// float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) *
// (z2 - z1);
float d = dataset[old * n + k];
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m,
const float *dataset,
float *temp, int *idxs,
cudaStream_t stream) {
// dataset: (B, N, N)
// temp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_with_dist_kernel<1024><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_with_dist_kernel<512><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_with_dist_kernel<256><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_with_dist_kernel<128><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_with_dist_kernel<64><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_with_dist_kernel<32><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_with_dist_kernel<16><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_with_dist_kernel<8><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_with_dist_kernel<4><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_with_dist_kernel<2><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_with_dist_kernel<1><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_with_dist_kernel<512><<<b, n_threads, 0, stream>>>(
b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
3f33916a0a6f05b55d9109b1b1bf0d933fb590ac.hip | // !!! This is a file automatically generated by hipify!!!
#include <hiprand/hiprand.h>
#include <cstdio>
#include <iostream>
#include <hip/hip_runtime.h>
#include "n_body_sim_cuda.cuh"
// macro for error-handling
#define gpuErrChk(ans) { gpuAssert((ans), (char*)__FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char* file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Flag for pingpong;
int pingpong = 0;
// Number particles; determined at runtime.
int num_particles;
int num_blocks;
int num_threads_per_block;
// Algorithm to use.
int algorithm;
// Device buffer variables
float2* particle_vels[2]; // x and y represent velocity in 2D
float3* particle_data[2]; // x and y represent position in 2D, z represents mass
__global__
void cudaInitKernel(float2 * vels_buffer, float3 * data_buffer1, float3 * data_buffer2, float * random, float box_width,
float box_height, float min_vel, float max_vel, int num_particles)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < num_particles)
{
vels_buffer[i].x = min_vel + random[4 * i] * (max_vel - min_vel);
vels_buffer[i].y = min_vel + random[4 * i + 1] * (max_vel - min_vel);
data_buffer1[i].x = random[4 * i + 2] * box_width;
data_buffer1[i].y = random[4 * i + 3] * box_height;
data_buffer1[i].z = 1;
data_buffer2[i].z = 1;
/* if (i == 0) {
data_buffer1[i].z = 1000;
data_buffer2[i].z = 1000;
data_buffer1[i].x = box_width / 2;
data_buffer1[i].y = box_height / 2;
vels_buffer[i].x = 0;
vels_buffer[i].y = 0;
}
*/
i += blockDim.x * gridDim.x;
}
}
void alloc_particle_info() {
// instantiate particle_vels, particle_data on GPU
gpuErrChk(hipMalloc((void **) &particle_vels[0], sizeof(float2) * num_particles));
gpuErrChk(hipMalloc((void **) &particle_vels[1], sizeof(float2) * num_particles));
gpuErrChk(hipMalloc((void **) &particle_data[0], sizeof(float3) * num_particles));
gpuErrChk(hipMalloc((void **) &particle_data[1], sizeof(float3) * num_particles));
}
void init_data(int h_num_particles, float box_width, float box_height, float min_vel,
float max_vel, int h_num_blocks, int h_num_threads_per_block, int h_algorithm)
{
num_particles = h_num_particles;
num_blocks = h_num_blocks;
num_threads_per_block = h_num_threads_per_block;
algorithm = h_algorithm;
// instantiate particle_vels, particle_data on GPU
alloc_particle_info();
// set initial values for particle_vels, particle_data on GPU
float * random;
gpuErrChk(hipMalloc((void **) &random, sizeof(float) * num_particles * 4));
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandGenerateUniform(gen, random, num_particles * 4);
hipLaunchKernelGGL(( cudaInitKernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, particle_vels[0], particle_data[0], particle_data[1],
random, box_width, box_height, min_vel, max_vel, num_particles);
hiprandDestroyGenerator(gen);
gpuErrChk(hipFree(random));
}
void init_data(int h_num_particles, float *h_particle_data, float *h_particle_vels, int h_num_blocks, int h_num_threads_per_block, int h_algorithm) {
num_particles = h_num_particles;
num_blocks = h_num_blocks;
num_threads_per_block = h_num_threads_per_block;
algorithm = h_algorithm;
alloc_particle_info();
gpuErrChk(hipMemcpy(particle_data[0], h_particle_data, 3 * num_particles * sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy(particle_data[1], h_particle_data, 3 * num_particles * sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy(particle_vels[0], h_particle_vels, 2 * num_particles * sizeof(float), hipMemcpyHostToDevice));
}
void delete_data() {
// free all memory on GPU
for (int i = 0; i < 2; i++)
{
gpuErrChk(hipFree(particle_vels[i]));
gpuErrChk(hipFree(particle_data[i]));
}
}
__device__
float2 get_force(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force;
force.x = 0;
force.y = 0;
float3 other_data; // saves about 3s @ 128 threads/block and 1024 particles to store data_old[i], x_dist, and y_dist locally
float x_dist, y_dist, dist_squared;
float force_magnitude;
float soft_factor = SOFT_FACTOR;
for (int i = 0; i < num_particles; i++)
{
other_data = data_old[i];
x_dist = pos_data.x - other_data.x;
y_dist = pos_data.y - other_data.y;
dist_squared = x_dist * x_dist + y_dist * y_dist + soft_factor;
force_magnitude = pos_data.z * other_data.z / dist_squared;
force.x -= x_dist * force_magnitude / sqrt(dist_squared);
force.y -= y_dist * force_magnitude / sqrt(dist_squared);
}
return force;
}
__device__
float2 get_force_opt1(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1;
float x_dist1, y_dist1, dist_cubed1;
float force_magnitude1;
float soft_factor;
for (int i = 0; i < num_particles; i+=1)
{
other_data1 = data_old[i];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
force.x += x_dist1 * force_magnitude1;
force.y += y_dist1 * force_magnitude1;
}
return force;
}
__device__
float2 get_force_opt2(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1, other_data2;
float x_dist1, y_dist1, dist_cubed1, x_dist2, y_dist2, dist_cubed2;
float force_magnitude1, force_magnitude2;
float soft_factor;
for (int i = 0; i < num_particles; i+=2)
{
other_data1 = data_old[i];
other_data2 = data_old[i + 1];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
x_dist2 = pos_data.x - other_data2.x;
y_dist2 = pos_data.y - other_data2.y;
dist_cubed2 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude2 = pos_data.z * other_data2.z / dist_cubed2;
force.x += x_dist1 * force_magnitude1 + x_dist2 * force_magnitude2;
force.y += y_dist1 * force_magnitude1 + y_dist2 * force_magnitude2;
}
return force;
}
__device__
float2 get_force_opt4(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1, other_data2, other_data3, other_data4;
float x_dist1, y_dist1, dist_cubed1, x_dist2, y_dist2, dist_cubed2;
float x_dist3, y_dist3, dist_cubed3, x_dist4, y_dist4, dist_cubed4;
float force_magnitude1, force_magnitude2, force_magnitude3, force_magnitude4;
float soft_factor = SOFT_FACTOR;
for (int i = 0; i < num_particles; i+=4)
{
other_data1 = data_old[i];
other_data2 = data_old[i + 1];
other_data3 = data_old[i + 2];
other_data4 = data_old[i + 3];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
x_dist2 = pos_data.x - other_data2.x;
y_dist2 = pos_data.y - other_data2.y;
dist_cubed2 = pow(x_dist2 * x_dist2 + y_dist2 * y_dist2 + soft_factor, 1.5f);
force_magnitude2 = pos_data.z * other_data2.z / dist_cubed2;
x_dist3 = pos_data.x - other_data3.x;
y_dist3 = pos_data.y - other_data3.y;
dist_cubed3 = pow(x_dist3 * x_dist3 + y_dist3 * y_dist3 + soft_factor, 1.5f);
force_magnitude3 = pos_data.z * other_data3.z / dist_cubed3;
x_dist4 = pos_data.x - other_data4.x;
y_dist4 = pos_data.y - other_data4.y;
dist_cubed4 = pow(x_dist4 * x_dist4 + y_dist4 * y_dist4 + soft_factor, 1.5f);
force_magnitude4 = pos_data.z * other_data4.z / dist_cubed4;
force.x += x_dist1 * force_magnitude1 + x_dist2 * force_magnitude2 +
x_dist3 * force_magnitude3 + x_dist4 * force_magnitude4;
force.y += y_dist1 * force_magnitude1 + y_dist2 * force_magnitude2 +
y_dist3 * force_magnitude3 + y_dist4 * force_magnitude4;
}
return force;
}
__device__
float2 get_force_opt8(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1, other_data2, other_data3, other_data4;
float3 other_data5, other_data6, other_data7, other_data8;
float x_dist1, y_dist1, dist_cubed1, x_dist2, y_dist2, dist_cubed2;
float x_dist3, y_dist3, dist_cubed3, x_dist4, y_dist4, dist_cubed4;
float x_dist5, y_dist5, dist_cubed5, x_dist6, y_dist6, dist_cubed6;
float x_dist7, y_dist7, dist_cubed7, x_dist8, y_dist8, dist_cubed8;
float force_magnitude1, force_magnitude2, force_magnitude3, force_magnitude4;
float force_magnitude5, force_magnitude6, force_magnitude7, force_magnitude8;
float soft_factor = SOFT_FACTOR;
for (int i = 0; i < num_particles; i+=8)
{
other_data1 = data_old[i];
other_data2 = data_old[i + 1];
other_data3 = data_old[i + 2];
other_data4 = data_old[i + 3];
other_data5 = data_old[i + 4];
other_data6 = data_old[i + 5];
other_data7 = data_old[i + 6];
other_data8 = data_old[i + 7];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
x_dist2 = pos_data.x - other_data2.x;
y_dist2 = pos_data.y - other_data2.y;
dist_cubed2 = pow(x_dist2 * x_dist2 + y_dist2 * y_dist2 + soft_factor, 1.5f);
force_magnitude2 = pos_data.z * other_data2.z / dist_cubed2;
x_dist3 = pos_data.x - other_data3.x;
y_dist3 = pos_data.y - other_data3.y;
dist_cubed3 = pow(x_dist3 * x_dist3 + y_dist3 * y_dist3 + soft_factor, 1.5f);
force_magnitude3 = pos_data.z * other_data3.z / dist_cubed3;
x_dist4 = pos_data.x - other_data4.x;
y_dist4 = pos_data.y - other_data4.y;
dist_cubed4 = pow(x_dist4 * x_dist4 + y_dist4 * y_dist4 + soft_factor, 1.5f);
force_magnitude4 = pos_data.z * other_data4.z / dist_cubed4;
x_dist5 = pos_data.x - other_data5.x;
y_dist5 = pos_data.y - other_data5.y;
dist_cubed5 = pow(x_dist5 * x_dist5 + y_dist5 * y_dist5 + soft_factor, 1.5f);
force_magnitude5 = pos_data.z * other_data5.z / dist_cubed5;
x_dist6 = pos_data.x - other_data6.x;
y_dist6 = pos_data.y - other_data6.y;
dist_cubed6 = pow(x_dist6 * x_dist6 + y_dist6 * y_dist6 + soft_factor, 1.5f);
force_magnitude6 = pos_data.z * other_data6.z / dist_cubed6;
x_dist7 = pos_data.x - other_data7.x;
y_dist7 = pos_data.y - other_data7.y;
dist_cubed7 = pow(x_dist7 * x_dist7 + y_dist7 * y_dist7 + soft_factor, 1.5f);
force_magnitude7 = pos_data.z * other_data7.z / dist_cubed7;
x_dist8 = pos_data.x - other_data8.x;
y_dist8 = pos_data.y - other_data8.y;
dist_cubed8 = pow(x_dist8 * x_dist8 + y_dist8 * y_dist8 + soft_factor, 1.5f);
force_magnitude8 = pos_data.z * other_data8.z / dist_cubed8;
force.x += x_dist1 * force_magnitude1 + x_dist2 * force_magnitude2 +
x_dist3 * force_magnitude3 + x_dist4 * force_magnitude4 +
x_dist5 * force_magnitude5 + x_dist6 * force_magnitude6 +
x_dist7 * force_magnitude7 + x_dist8 * force_magnitude8;
force.y += y_dist1 * force_magnitude1 + y_dist2 * force_magnitude2 +
y_dist3 * force_magnitude3 + y_dist4 * force_magnitude4 +
y_dist5 * force_magnitude5 + y_dist6 * force_magnitude6 +
y_dist7 * force_magnitude7 + y_dist8 * force_magnitude8;
}
return force;
}
__global__
void simple_kernel(float2 * vels_old, float2 * vels_new, float3 * data_old, float3 * data_new, float dt, int num_particles) {
// each thread handles a particle
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < num_particles)
{
float2 force = get_force(data_old[i], data_old, num_particles);
vels_new[i].x = vels_old[i].x + force.x * dt / data_old[i].z;
vels_new[i].y = vels_old[i].y + force.y * dt / data_old[i].z;
data_new[i].x = data_old[i].x + vels_new[i].x * dt;
data_new[i].y = data_old[i].y + vels_new[i].y * dt;
i += blockDim.x * gridDim.x;
}
}
__global__
void pxp_kernel(float2 * vels_old, float2 * vels_new, float3 * data_old, float3 * data_new, float dt, int num_particles) {
extern __shared__ float3 sdata[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
while (i < num_particles)
{
float2 force;
force.x = 0;
force.y = 0;
float3 pos_data = data_old[i];
// NOTE: num_particles is a multiple of num_threads_per_block.
for (int num_tile = 0; num_tile * blockDim.x < num_particles; num_tile++)
{
__syncthreads();
sdata[tid] = data_old[num_tile * blockDim.x + tid];
__syncthreads();
float2 block_force = get_force(pos_data, sdata, blockDim.x);
force.x += block_force.x;
force.y += block_force.y;
}
vels_new[i].x = vels_old[i].x + force.x * dt / data_old[i].z; // TODO: replace data_old[i] with pos_data
vels_new[i].y = vels_old[i].y + force.y * dt / data_old[i].z;
data_new[i].x = data_old[i].x + vels_new[i].x * dt;
data_new[i].y = data_old[i].y + vels_new[i].y * dt;
i += blockDim.x * gridDim.x;
}
}
__global__
void pxp_opt_forces_kernel(float2 * forces, float2 * vels_old, float2 * vels_new, float3 * data_old,
float3 * data_new, float dt, int num_particles)
{
extern __shared__ float3 sdata[];
int tile_id = blockIdx.x;
int tid = threadIdx.x;
int num_tiles_per_col = num_particles / blockDim.x;
int num_tiles = num_particles * num_particles / (blockDim.x * blockDim.x);
while (tile_id < num_tiles)
{
int rid = (tile_id % num_tiles_per_col) * blockDim.x + tid;
int cid = (tile_id/num_tiles_per_col) * blockDim.x + tid;
sdata[tid] = data_old[cid];
__syncthreads();
float2 block_force = get_force(data_old[rid], sdata, blockDim.x);
atomicAdd(&forces[rid].x, block_force.x);
atomicAdd(&forces[rid].y, block_force.y);
__syncthreads();
tile_id += gridDim.x;
}
}
__global__
void pxp_opt_particles_kernel(float2 * forces, float2 * vels_old, float2 * vels_new, float3 * data_old,
float3 * data_new, float dt, int num_particles)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < num_particles)
{
float2 force = forces[i];
vels_new[i].x = vels_old[i].x + force.x * dt / data_old[i].z; // TODO: replace data_old[i] with pos_data
vels_new[i].y = vels_old[i].y + force.y * dt / data_old[i].z;
data_new[i].x = data_old[i].x + vels_new[i].x * dt;
data_new[i].y = data_old[i].y + vels_new[i].y * dt;
i += blockDim.x * gridDim.x;
}
}
void call_interact_kernel(float dt) {
// call kernel
if (algorithm == SIMPLE)
{
hipLaunchKernelGGL(( simple_kernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
}
else if (algorithm == PXP)
{
hipLaunchKernelGGL(( pxp_kernel), dim3(num_blocks), dim3(num_threads_per_block), num_threads_per_block * sizeof(float3), 0,
particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
}
else if (algorithm == PXP_OPT)
{
float2 * forces;
gpuErrChk(hipMalloc((void **) &forces, num_particles * sizeof(float2)));
gpuErrChk(hipMemset(forces, 0, num_particles * sizeof(float2)));
hipLaunchKernelGGL(( pxp_opt_forces_kernel), dim3(num_blocks), dim3(num_threads_per_block), num_threads_per_block * sizeof(float3), 0,
forces, particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
hipLaunchKernelGGL(( pxp_opt_particles_kernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, forces, particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
gpuErrChk(hipFree(forces));
}
else {
std::cout << "Invalid algorithm supplied: " << algorithm << "\n";
}
// update pingpong
pingpong = 1 - pingpong;
}
void get_particle_data(float * h_particle_data, float * h_particle_vels) {
// copy GPU data into particle_data, particle_vels array
gpuErrChk(hipMemcpy(h_particle_data, particle_data[1 - pingpong], sizeof(float) * 3 * num_particles, hipMemcpyDeviceToHost));
gpuErrChk(hipMemcpy(h_particle_vels, particle_vels[1 - pingpong], sizeof(float) * 2 * num_particles, hipMemcpyDeviceToHost));
}
| 3f33916a0a6f05b55d9109b1b1bf0d933fb590ac.cu | #include <curand.h>
#include <cstdio>
#include <iostream>
#include <cuda_runtime.h>
#include "n_body_sim_cuda.cuh"
// macro for error-handling
#define gpuErrChk(ans) { gpuAssert((ans), (char*)__FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char* file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Flag for pingpong;
int pingpong = 0;
// Number particles; determined at runtime.
int num_particles;
int num_blocks;
int num_threads_per_block;
// Algorithm to use.
int algorithm;
// Device buffer variables
float2* particle_vels[2]; // x and y represent velocity in 2D
float3* particle_data[2]; // x and y represent position in 2D, z represents mass
__global__
void cudaInitKernel(float2 * vels_buffer, float3 * data_buffer1, float3 * data_buffer2, float * random, float box_width,
float box_height, float min_vel, float max_vel, int num_particles)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < num_particles)
{
vels_buffer[i].x = min_vel + random[4 * i] * (max_vel - min_vel);
vels_buffer[i].y = min_vel + random[4 * i + 1] * (max_vel - min_vel);
data_buffer1[i].x = random[4 * i + 2] * box_width;
data_buffer1[i].y = random[4 * i + 3] * box_height;
data_buffer1[i].z = 1;
data_buffer2[i].z = 1;
/* if (i == 0) {
data_buffer1[i].z = 1000;
data_buffer2[i].z = 1000;
data_buffer1[i].x = box_width / 2;
data_buffer1[i].y = box_height / 2;
vels_buffer[i].x = 0;
vels_buffer[i].y = 0;
}
*/
i += blockDim.x * gridDim.x;
}
}
void alloc_particle_info() {
// instantiate particle_vels, particle_data on GPU
gpuErrChk(cudaMalloc((void **) &particle_vels[0], sizeof(float2) * num_particles));
gpuErrChk(cudaMalloc((void **) &particle_vels[1], sizeof(float2) * num_particles));
gpuErrChk(cudaMalloc((void **) &particle_data[0], sizeof(float3) * num_particles));
gpuErrChk(cudaMalloc((void **) &particle_data[1], sizeof(float3) * num_particles));
}
void init_data(int h_num_particles, float box_width, float box_height, float min_vel,
float max_vel, int h_num_blocks, int h_num_threads_per_block, int h_algorithm)
{
num_particles = h_num_particles;
num_blocks = h_num_blocks;
num_threads_per_block = h_num_threads_per_block;
algorithm = h_algorithm;
// instantiate particle_vels, particle_data on GPU
alloc_particle_info();
// set initial values for particle_vels, particle_data on GPU
float * random;
gpuErrChk(cudaMalloc((void **) &random, sizeof(float) * num_particles * 4));
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandGenerateUniform(gen, random, num_particles * 4);
cudaInitKernel<<<num_blocks, num_threads_per_block>>>(particle_vels[0], particle_data[0], particle_data[1],
random, box_width, box_height, min_vel, max_vel, num_particles);
curandDestroyGenerator(gen);
gpuErrChk(cudaFree(random));
}
void init_data(int h_num_particles, float *h_particle_data, float *h_particle_vels, int h_num_blocks, int h_num_threads_per_block, int h_algorithm) {
num_particles = h_num_particles;
num_blocks = h_num_blocks;
num_threads_per_block = h_num_threads_per_block;
algorithm = h_algorithm;
alloc_particle_info();
gpuErrChk(cudaMemcpy(particle_data[0], h_particle_data, 3 * num_particles * sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy(particle_data[1], h_particle_data, 3 * num_particles * sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy(particle_vels[0], h_particle_vels, 2 * num_particles * sizeof(float), cudaMemcpyHostToDevice));
}
void delete_data() {
// free all memory on GPU
for (int i = 0; i < 2; i++)
{
gpuErrChk(cudaFree(particle_vels[i]));
gpuErrChk(cudaFree(particle_data[i]));
}
}
__device__
float2 get_force(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force;
force.x = 0;
force.y = 0;
float3 other_data; // saves about 3s @ 128 threads/block and 1024 particles to store data_old[i], x_dist, and y_dist locally
float x_dist, y_dist, dist_squared;
float force_magnitude;
float soft_factor = SOFT_FACTOR;
for (int i = 0; i < num_particles; i++)
{
other_data = data_old[i];
x_dist = pos_data.x - other_data.x;
y_dist = pos_data.y - other_data.y;
dist_squared = x_dist * x_dist + y_dist * y_dist + soft_factor;
force_magnitude = pos_data.z * other_data.z / dist_squared;
force.x -= x_dist * force_magnitude / sqrt(dist_squared);
force.y -= y_dist * force_magnitude / sqrt(dist_squared);
}
return force;
}
__device__
float2 get_force_opt1(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1;
float x_dist1, y_dist1, dist_cubed1;
float force_magnitude1;
float soft_factor;
for (int i = 0; i < num_particles; i+=1)
{
other_data1 = data_old[i];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
force.x += x_dist1 * force_magnitude1;
force.y += y_dist1 * force_magnitude1;
}
return force;
}
__device__
float2 get_force_opt2(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1, other_data2;
float x_dist1, y_dist1, dist_cubed1, x_dist2, y_dist2, dist_cubed2;
float force_magnitude1, force_magnitude2;
float soft_factor;
for (int i = 0; i < num_particles; i+=2)
{
other_data1 = data_old[i];
other_data2 = data_old[i + 1];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
x_dist2 = pos_data.x - other_data2.x;
y_dist2 = pos_data.y - other_data2.y;
dist_cubed2 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude2 = pos_data.z * other_data2.z / dist_cubed2;
force.x += x_dist1 * force_magnitude1 + x_dist2 * force_magnitude2;
force.y += y_dist1 * force_magnitude1 + y_dist2 * force_magnitude2;
}
return force;
}
__device__
float2 get_force_opt4(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1, other_data2, other_data3, other_data4;
float x_dist1, y_dist1, dist_cubed1, x_dist2, y_dist2, dist_cubed2;
float x_dist3, y_dist3, dist_cubed3, x_dist4, y_dist4, dist_cubed4;
float force_magnitude1, force_magnitude2, force_magnitude3, force_magnitude4;
float soft_factor = SOFT_FACTOR;
for (int i = 0; i < num_particles; i+=4)
{
other_data1 = data_old[i];
other_data2 = data_old[i + 1];
other_data3 = data_old[i + 2];
other_data4 = data_old[i + 3];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
x_dist2 = pos_data.x - other_data2.x;
y_dist2 = pos_data.y - other_data2.y;
dist_cubed2 = pow(x_dist2 * x_dist2 + y_dist2 * y_dist2 + soft_factor, 1.5f);
force_magnitude2 = pos_data.z * other_data2.z / dist_cubed2;
x_dist3 = pos_data.x - other_data3.x;
y_dist3 = pos_data.y - other_data3.y;
dist_cubed3 = pow(x_dist3 * x_dist3 + y_dist3 * y_dist3 + soft_factor, 1.5f);
force_magnitude3 = pos_data.z * other_data3.z / dist_cubed3;
x_dist4 = pos_data.x - other_data4.x;
y_dist4 = pos_data.y - other_data4.y;
dist_cubed4 = pow(x_dist4 * x_dist4 + y_dist4 * y_dist4 + soft_factor, 1.5f);
force_magnitude4 = pos_data.z * other_data4.z / dist_cubed4;
force.x += x_dist1 * force_magnitude1 + x_dist2 * force_magnitude2 +
x_dist3 * force_magnitude3 + x_dist4 * force_magnitude4;
force.y += y_dist1 * force_magnitude1 + y_dist2 * force_magnitude2 +
y_dist3 * force_magnitude3 + y_dist4 * force_magnitude4;
}
return force;
}
__device__
float2 get_force_opt8(float3 pos_data, float3 * data_old, int num_particles) {
// sum force from every other particle based on mass, position of both particles
float2 force = {0, 0};
float3 other_data1, other_data2, other_data3, other_data4;
float3 other_data5, other_data6, other_data7, other_data8;
float x_dist1, y_dist1, dist_cubed1, x_dist2, y_dist2, dist_cubed2;
float x_dist3, y_dist3, dist_cubed3, x_dist4, y_dist4, dist_cubed4;
float x_dist5, y_dist5, dist_cubed5, x_dist6, y_dist6, dist_cubed6;
float x_dist7, y_dist7, dist_cubed7, x_dist8, y_dist8, dist_cubed8;
float force_magnitude1, force_magnitude2, force_magnitude3, force_magnitude4;
float force_magnitude5, force_magnitude6, force_magnitude7, force_magnitude8;
float soft_factor = SOFT_FACTOR;
for (int i = 0; i < num_particles; i+=8)
{
other_data1 = data_old[i];
other_data2 = data_old[i + 1];
other_data3 = data_old[i + 2];
other_data4 = data_old[i + 3];
other_data5 = data_old[i + 4];
other_data6 = data_old[i + 5];
other_data7 = data_old[i + 6];
other_data8 = data_old[i + 7];
x_dist1 = pos_data.x - other_data1.x;
y_dist1 = pos_data.y - other_data1.y;
dist_cubed1 = pow(x_dist1 * x_dist1 + y_dist1 * y_dist1 + soft_factor, 1.5f);
force_magnitude1 = pos_data.z * other_data1.z / dist_cubed1;
x_dist2 = pos_data.x - other_data2.x;
y_dist2 = pos_data.y - other_data2.y;
dist_cubed2 = pow(x_dist2 * x_dist2 + y_dist2 * y_dist2 + soft_factor, 1.5f);
force_magnitude2 = pos_data.z * other_data2.z / dist_cubed2;
x_dist3 = pos_data.x - other_data3.x;
y_dist3 = pos_data.y - other_data3.y;
dist_cubed3 = pow(x_dist3 * x_dist3 + y_dist3 * y_dist3 + soft_factor, 1.5f);
force_magnitude3 = pos_data.z * other_data3.z / dist_cubed3;
x_dist4 = pos_data.x - other_data4.x;
y_dist4 = pos_data.y - other_data4.y;
dist_cubed4 = pow(x_dist4 * x_dist4 + y_dist4 * y_dist4 + soft_factor, 1.5f);
force_magnitude4 = pos_data.z * other_data4.z / dist_cubed4;
x_dist5 = pos_data.x - other_data5.x;
y_dist5 = pos_data.y - other_data5.y;
dist_cubed5 = pow(x_dist5 * x_dist5 + y_dist5 * y_dist5 + soft_factor, 1.5f);
force_magnitude5 = pos_data.z * other_data5.z / dist_cubed5;
x_dist6 = pos_data.x - other_data6.x;
y_dist6 = pos_data.y - other_data6.y;
dist_cubed6 = pow(x_dist6 * x_dist6 + y_dist6 * y_dist6 + soft_factor, 1.5f);
force_magnitude6 = pos_data.z * other_data6.z / dist_cubed6;
x_dist7 = pos_data.x - other_data7.x;
y_dist7 = pos_data.y - other_data7.y;
dist_cubed7 = pow(x_dist7 * x_dist7 + y_dist7 * y_dist7 + soft_factor, 1.5f);
force_magnitude7 = pos_data.z * other_data7.z / dist_cubed7;
x_dist8 = pos_data.x - other_data8.x;
y_dist8 = pos_data.y - other_data8.y;
dist_cubed8 = pow(x_dist8 * x_dist8 + y_dist8 * y_dist8 + soft_factor, 1.5f);
force_magnitude8 = pos_data.z * other_data8.z / dist_cubed8;
force.x += x_dist1 * force_magnitude1 + x_dist2 * force_magnitude2 +
x_dist3 * force_magnitude3 + x_dist4 * force_magnitude4 +
x_dist5 * force_magnitude5 + x_dist6 * force_magnitude6 +
x_dist7 * force_magnitude7 + x_dist8 * force_magnitude8;
force.y += y_dist1 * force_magnitude1 + y_dist2 * force_magnitude2 +
y_dist3 * force_magnitude3 + y_dist4 * force_magnitude4 +
y_dist5 * force_magnitude5 + y_dist6 * force_magnitude6 +
y_dist7 * force_magnitude7 + y_dist8 * force_magnitude8;
}
return force;
}
__global__
void simple_kernel(float2 * vels_old, float2 * vels_new, float3 * data_old, float3 * data_new, float dt, int num_particles) {
// each thread handles a particle
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < num_particles)
{
float2 force = get_force(data_old[i], data_old, num_particles);
vels_new[i].x = vels_old[i].x + force.x * dt / data_old[i].z;
vels_new[i].y = vels_old[i].y + force.y * dt / data_old[i].z;
data_new[i].x = data_old[i].x + vels_new[i].x * dt;
data_new[i].y = data_old[i].y + vels_new[i].y * dt;
i += blockDim.x * gridDim.x;
}
}
__global__
void pxp_kernel(float2 * vels_old, float2 * vels_new, float3 * data_old, float3 * data_new, float dt, int num_particles) {
extern __shared__ float3 sdata[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
while (i < num_particles)
{
float2 force;
force.x = 0;
force.y = 0;
float3 pos_data = data_old[i];
// NOTE: num_particles is a multiple of num_threads_per_block.
for (int num_tile = 0; num_tile * blockDim.x < num_particles; num_tile++)
{
__syncthreads();
sdata[tid] = data_old[num_tile * blockDim.x + tid];
__syncthreads();
float2 block_force = get_force(pos_data, sdata, blockDim.x);
force.x += block_force.x;
force.y += block_force.y;
}
vels_new[i].x = vels_old[i].x + force.x * dt / data_old[i].z; // TODO: replace data_old[i] with pos_data
vels_new[i].y = vels_old[i].y + force.y * dt / data_old[i].z;
data_new[i].x = data_old[i].x + vels_new[i].x * dt;
data_new[i].y = data_old[i].y + vels_new[i].y * dt;
i += blockDim.x * gridDim.x;
}
}
__global__
void pxp_opt_forces_kernel(float2 * forces, float2 * vels_old, float2 * vels_new, float3 * data_old,
float3 * data_new, float dt, int num_particles)
{
extern __shared__ float3 sdata[];
int tile_id = blockIdx.x;
int tid = threadIdx.x;
int num_tiles_per_col = num_particles / blockDim.x;
int num_tiles = num_particles * num_particles / (blockDim.x * blockDim.x);
while (tile_id < num_tiles)
{
int rid = (tile_id % num_tiles_per_col) * blockDim.x + tid;
int cid = (tile_id/num_tiles_per_col) * blockDim.x + tid;
sdata[tid] = data_old[cid];
__syncthreads();
float2 block_force = get_force(data_old[rid], sdata, blockDim.x);
atomicAdd(&forces[rid].x, block_force.x);
atomicAdd(&forces[rid].y, block_force.y);
__syncthreads();
tile_id += gridDim.x;
}
}
__global__
void pxp_opt_particles_kernel(float2 * forces, float2 * vels_old, float2 * vels_new, float3 * data_old,
float3 * data_new, float dt, int num_particles)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < num_particles)
{
float2 force = forces[i];
vels_new[i].x = vels_old[i].x + force.x * dt / data_old[i].z; // TODO: replace data_old[i] with pos_data
vels_new[i].y = vels_old[i].y + force.y * dt / data_old[i].z;
data_new[i].x = data_old[i].x + vels_new[i].x * dt;
data_new[i].y = data_old[i].y + vels_new[i].y * dt;
i += blockDim.x * gridDim.x;
}
}
void call_interact_kernel(float dt) {
// call kernel
if (algorithm == SIMPLE)
{
simple_kernel<<<num_blocks, num_threads_per_block>>>(particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
}
else if (algorithm == PXP)
{
pxp_kernel<<<num_blocks, num_threads_per_block, num_threads_per_block * sizeof(float3)>>>
(particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
}
else if (algorithm == PXP_OPT)
{
float2 * forces;
gpuErrChk(cudaMalloc((void **) &forces, num_particles * sizeof(float2)));
gpuErrChk(cudaMemset(forces, 0, num_particles * sizeof(float2)));
pxp_opt_forces_kernel<<<num_blocks, num_threads_per_block, num_threads_per_block * sizeof(float3)>>>
(forces, particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
pxp_opt_particles_kernel<<<num_blocks, num_threads_per_block>>>(forces, particle_vels[pingpong], particle_vels[1 - pingpong],
particle_data[pingpong], particle_data[1 - pingpong],
dt, num_particles);
gpuErrChk(cudaFree(forces));
}
else {
std::cout << "Invalid algorithm supplied: " << algorithm << "\n";
}
// update pingpong
pingpong = 1 - pingpong;
}
void get_particle_data(float * h_particle_data, float * h_particle_vels) {
// copy GPU data into particle_data, particle_vels array
gpuErrChk(cudaMemcpy(h_particle_data, particle_data[1 - pingpong], sizeof(float) * 3 * num_particles, cudaMemcpyDeviceToHost));
gpuErrChk(cudaMemcpy(h_particle_vels, particle_vels[1 - pingpong], sizeof(float) * 2 * num_particles, cudaMemcpyDeviceToHost));
}
|
14dbed1f2c092a9ed84b4c59734f22054d7dcbc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <stdlib.h>
#include <iostream>
#include "lodepng.h"
using namespace std;
__global__
void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int BLUR_SIZE = 25, new_pos;
if((y < n) && (x < m)) {
int pixValR=0, pixValB=0,pixValG=0, pixels = 0;
int blurRow, blurCol;
for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){
for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){
int curRow = y + blurRow;
int curCol = x + blurCol;
new_pos = (curRow*m+curCol)*4;
if(curRow > -1 && curRow < n && curCol > -1 && curCol < m){
pixValR += d_Pin[new_pos];
pixValG += d_Pin[new_pos+1];
pixValB += d_Pin[new_pos+2];
pixels++;
}
}
new_pos = (y*m+x)*4;
d_Pout[new_pos] = (unsigned char)(pixValR/pixels);
d_Pout[new_pos+1] = (unsigned char)(pixValG/pixels);
d_Pout[new_pos+2] = (unsigned char)(pixValB/pixels);
d_Pout[new_pos+3] = d_Pin[new_pos+3];
}
}
}
void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){
unsigned char* d_Pout, *d_Pin;
long int size = n*m*4;
hipMalloc((void **) &d_Pin,size);
hipMemcpy(d_Pin, Pin, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_Pout,size);
dim3 gridDim((m-1)/8+1,(n-1)/16+1,1);
dim3 blockDim(8,16,1);
hipLaunchKernelGGL(( PictureKernell), dim3(gridDim),dim3(blockDim), 0, 0, d_Pin,d_Pout,n,m);
hipMemcpy(Pout, d_Pout, size, hipMemcpyDeviceToHost);
hipFree(d_Pin); hipFree(d_Pout);
}
int main(int argc, char * argv[] ){
unsigned char *image, *out_image;
int i;
char name_in[100], name_out[100];
unsigned width, height;
if(argv[1] == NULL or argv[2] == NULL)
cout << "Usage\n inverse.cu [input image] [output image]\n";
strcpy(name_in,argv[1]);
strcpy(name_out,argv[2]);
i = lodepng_decode32_file(&image, &width, &height, name_in);
if(i < 0) printf("NO\n");
out_image = (unsigned char*) malloc(width*height*4);
Picture(image,out_image,height,width);
lodepng_encode32_file(name_out,out_image,width,height);
free(image);
free(out_image);
return 0;
} | 14dbed1f2c092a9ed84b4c59734f22054d7dcbc8.cu | #include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <stdlib.h>
#include <iostream>
#include "lodepng.h"
using namespace std;
__global__
void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int BLUR_SIZE = 25, new_pos;
if((y < n) && (x < m)) {
int pixValR=0, pixValB=0,pixValG=0, pixels = 0;
int blurRow, blurCol;
for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){
for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){
int curRow = y + blurRow;
int curCol = x + blurCol;
new_pos = (curRow*m+curCol)*4;
if(curRow > -1 && curRow < n && curCol > -1 && curCol < m){
pixValR += d_Pin[new_pos];
pixValG += d_Pin[new_pos+1];
pixValB += d_Pin[new_pos+2];
pixels++;
}
}
new_pos = (y*m+x)*4;
d_Pout[new_pos] = (unsigned char)(pixValR/pixels);
d_Pout[new_pos+1] = (unsigned char)(pixValG/pixels);
d_Pout[new_pos+2] = (unsigned char)(pixValB/pixels);
d_Pout[new_pos+3] = d_Pin[new_pos+3];
}
}
}
void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){
unsigned char* d_Pout, *d_Pin;
long int size = n*m*4;
cudaMalloc((void **) &d_Pin,size);
cudaMemcpy(d_Pin, Pin, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_Pout,size);
dim3 gridDim((m-1)/8+1,(n-1)/16+1,1);
dim3 blockDim(8,16,1);
PictureKernell<<<gridDim,blockDim>>>(d_Pin,d_Pout,n,m);
cudaMemcpy(Pout, d_Pout, size, cudaMemcpyDeviceToHost);
cudaFree(d_Pin); cudaFree(d_Pout);
}
int main(int argc, char * argv[] ){
unsigned char *image, *out_image;
int i;
char name_in[100], name_out[100];
unsigned width, height;
if(argv[1] == NULL or argv[2] == NULL)
cout << "Usage\n inverse.cu [input image] [output image]\n";
strcpy(name_in,argv[1]);
strcpy(name_out,argv[2]);
i = lodepng_decode32_file(&image, &width, &height, name_in);
if(i < 0) printf("NO\n");
out_image = (unsigned char*) malloc(width*height*4);
Picture(image,out_image,height,width);
lodepng_encode32_file(name_out,out_image,width,height);
free(image);
free(out_image);
return 0;
} |
0712e4b3431b558d9b891ee497af4f5c59507ad0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define __MAKEMORE_MEGATRON_CU__ 1
#include <stdio.h>
#include <math.h>
#include <vector>
#include <map>
#include "cudamem.hh"
#include "megatron.hh"
#include "mapfile.hh"
namespace makemore {
double adam_b1 = 0.9;
double adam_b2 = 0.999;
double adam_b3 = 0.5;
double adam_eps = 1e-8;
__global__ void gpu_megatron_feed(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double eta, double nu, bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int outi = blockIdx.x * blockDim.x + threadIdx.x;
if (outi >= outn)
return;
unsigned int outri = outi % outrn;
unsigned int mbi = outi / outrn;
unsigned int *inrip = oimap[outri];
unsigned int *wip = owmap[outri];
double sum = 0;
while (*inrip) {
unsigned int ini = mbi * inrn + *inrip - 1;
unsigned int wi = *wip;
sum += weight[wi] * in[ini];
++inrip;
++wip;
}
unsigned int wi = *wip;
sum += weight[wi] * 1.0;
if (activated) {
double q = 1.0 / (1.0 + exp(-sum));
out[outi] = q;
} else {
out[outi] = sum;
}
fout[outi] = 0.0;
}
__global__ void gpu_megatron_train0(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double eta, double nu, bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int outi = blockIdx.x * blockDim.x + threadIdx.x;
if (outi >= outn)
return;
double o = out[outi];
double fo = fout[outi];
if (o > 1.0)
o = 1.0;
else if (o < 0.0)
o = 0.0;
fout[outi] = fo * o * (1.0 - o);
}
__global__ void gpu_megatron_train1(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double eta, double nu, bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int ini = blockIdx.x * blockDim.x + threadIdx.x;
if (ini >= inn)
return;
unsigned int inri = ini % inrn;
unsigned int mbi = ini / inrn;
unsigned int *outrip = iomap[inri];
unsigned int *wip = iwmap[inri];
double sum = 0;
while (*outrip) {
unsigned int outi = mbi * outrn + *outrip - 1;
unsigned int wi = *wip;
sum += weight[wi] * fout[outi];
++outrip;
++wip;
}
fin[ini] += sum;
}
__global__ void gpu_megatron_train2(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double *m, double *v, double a, double b1, double b2, double b3, double eps,
bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int wi = blockIdx.x * blockDim.x + threadIdx.x;
if (wi >= wn)
return;
if (!(a > 0))
return;
unsigned int outri = womap[wi];
--outri;
unsigned int inri = wimap[wi];
if (inri == 0) {
for (unsigned int mbi = 0; mbi < mbn; ++mbi) {
unsigned int outi = mbi * outrn + outri;
double dw = fout[outi];
m[wi] = b1 * m[wi] + (1 - b1) * dw;
v[wi] = b2 * v[wi] + (1 - b2) * dw * dw;
weight[wi] += a * m[wi] / (pow(v[wi], b3) + eps);
//weight[wi] += a * dw;
}
} else {
--inri;
for (unsigned int mbi = 0; mbi < mbn; ++mbi) {
unsigned int outi = mbi * outrn + outri;
unsigned int ini = mbi * inrn + inri;
double dw = fout[outi] * in[ini];
m[wi] = b1 * m[wi] + (1 - b1) * dw;
v[wi] = b2 * v[wi] + (1 - b2) * dw * dw;
weight[wi] += a * m[wi] / (pow(v[wi], b3) + eps);
//weight[wi] += a * dw;
}
}
}
const double *Megatron::feed(const double *_in, double *_fin) {
in = _in;
fin = _fin;
int bs = 128;
int gs = (outn + bs - 1) / bs;
hipLaunchKernelGGL(( gpu_megatron_feed), dim3(gs), dim3(bs), 0, 0,
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight, eta, 1.0, activated,
inrn, outrn, mbn
);
return out;
}
void Megatron::train(double nu) {
if (activated) {
int bs0 = 128;
int gs0 = (outn + bs0 - 1) / bs0;
hipLaunchKernelGGL(( gpu_megatron_train0), dim3(gs0), dim3(bs0), 0, 0,
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight, eta, nu, activated,
inrn, outrn, mbn
);
}
if (fin) {
int bs1 = 128;
int gs1 = (inn + bs1 - 1) / bs1;
hipLaunchKernelGGL(( gpu_megatron_train1), dim3(gs1), dim3(bs1), 0, 0,
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight, eta, nu, activated,
inrn, outrn, mbn
);
}
int bs2 = 128;
int gs2 = (wn + bs2 - 1) / bs2;
hipLaunchKernelGGL(( gpu_megatron_train2), dim3(gs2), dim3(bs2), 0, 0,
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight,
m,v, eta*nu, adam_b1, adam_b2, adam_b3, adam_eps,
activated,
inrn, outrn, mbn
);
}
Megatron::Megatron(const Wiring *_wire, Mapfile *_mapfile, unsigned int _mbn, double _eta, bool _activated)
: Tron(_wire->inn * _mbn, _wire->outn * _mbn)
{
mbn = _mbn;
assert(mbn > 0);
assert(inn % mbn == 0);
inrn = inn / mbn;
assert(outn % mbn == 0);
outrn = outn / mbn;
wire = _wire;
mapfile = _mapfile;
cumake(&out, outn);
cumake(&fout, outn);
cumake(&owmap, outrn);
cumake(&oimap, outrn);
cumake(&iomap, inrn);
cumake(&iwmap, inrn);
eta = _eta;
activated = _activated;
_makemaps();
cumake(&weight, wn);
mapfile->map(weight, wn);
mapfile->load(weight);
cumake(&m, wn);
mapfile->map(m, wn);
mapfile->load(m);
cumake(&v, wn);
mapfile->map(v, wn);
mapfile->load(v);
}
Megatron::~Megatron() {
cufree(out);
cufree(fout);
cufree(owmap);
cufree(oimap);
cufree(iwmap);
cufree(iomap);
cufree(womap);
cufree(wimap);
cufree(weight);
}
void Megatron::_makemaps() {
using namespace std;
vector< vector<unsigned int> > moi, mow, mio, miw;
assert(wire->outn == outrn);
assert(wire->inn == inrn);
moi.resize(wire->outn);
mow.resize(wire->outn);
mio.resize(wire->inn);
miw.resize(wire->inn);
unsigned int wi = 0;
wire->_makemaps(mio, miw, moi, mow);
wi += wire->wn;
for (unsigned int outri = 0; outri < wire->outn; ++outri) {
moi[outri].push_back(0);
mow[outri].push_back(wi);
++wi;
}
wn = wi;
std::vector<unsigned int> mwi, mwo;
mwi.resize(wn);
mwo.resize(wn);
for (unsigned int outri = 0; outri < wire->outn; ++outri) {
for (auto wip = mow[outri].begin(); wip != mow[outri].end(); ++wip) {
mwo[*wip] = outri + 1;
mwi[*wip] = 0;
}
}
for (unsigned int inri = 0; inri < wire->inn; ++inri) {
for (auto wip = miw[inri].begin(); wip != miw[inri].end(); ++wip) {
mwi[*wip] = inri + 1;
}
}
for (unsigned int inri = 0; inri < wire->inn; ++inri) {
mio[inri].push_back(0);
miw[inri].push_back((unsigned int)-1);
}
unsigned int mapbufn = 0;
for (unsigned int outri = 0; outri < outrn; ++outri) {
const vector<unsigned int>& v = moi[outri];
const vector<unsigned int>& w = mow[outri];
assert(v.size());
mapbufn += v.size();
assert(w.size());
mapbufn += w.size();
}
for (unsigned int inri = 0; inri < inrn; ++inri) {
const vector<unsigned int>& v = mio[inri];
const vector<unsigned int>& w = miw[inri];
assert(v.size());
mapbufn += v.size();
assert(w.size());
mapbufn += w.size();
}
cumake(&mapbuf, mapbufn);
unsigned int mapbufi = 0;
unsigned int *cmapbuf = new unsigned int[mapbufn];
unsigned int **coimap = new unsigned int *[outrn];
unsigned int **cowmap = new unsigned int *[outrn];
unsigned int **ciomap = new unsigned int *[inrn];
unsigned int **ciwmap = new unsigned int *[inrn];
for (unsigned int outri = 0; outri < outrn; ++outri) {
const vector<unsigned int>& v = moi[outri];
const vector<unsigned int>& w = mow[outri];
memcpy(cmapbuf + mapbufi, v.data(), v.size() * sizeof(unsigned int));
coimap[outri] = mapbuf + mapbufi;
mapbufi += v.size();
memcpy(cmapbuf + mapbufi, w.data(), w.size() * sizeof(unsigned int));
cowmap[outri] = mapbuf + mapbufi;
mapbufi += w.size();
}
for (unsigned int inri = 0; inri < inrn; ++inri) {
const vector<unsigned int>& v = mio[inri];
const vector<unsigned int>& w = miw[inri];
memcpy(cmapbuf + mapbufi, v.data(), v.size() * sizeof(unsigned int));
ciomap[inri] = mapbuf + mapbufi;
mapbufi += v.size();
memcpy(cmapbuf + mapbufi, w.data(), w.size() * sizeof(unsigned int));
ciwmap[inri] = mapbuf + mapbufi;
mapbufi += w.size();
}
assert(mapbufi == mapbufn);
encude(cmapbuf, mapbufn, mapbuf);
delete[] cmapbuf;
encude(coimap, outrn, oimap);
encude(cowmap, outrn, owmap);
encude(ciomap, inrn, iomap);
encude(ciwmap, inrn, iwmap);
delete[] ciomap;
delete[] ciwmap;
delete[] coimap;
delete[] cowmap;
_mow = mow;
cumake(&wimap, wn);
encude(mwi.data(), wn, wimap);
cumake(&womap, wn);
encude(mwo.data(), wn, womap);
}
void Megatron::randomize(double disp) {
using namespace std;
double *cweight = new double[wn];
for (unsigned int outri = 0; outri < outrn; ++outri) {
const vector<unsigned int>& w = _mow[outri];
assert(w.size());
double iss = disp / sqrt(w.size() - 1);
double sw = 0;
for (unsigned int i = 0; i < w.size() - 1; ++i) {
// double ww = iss * rnd(-1, 1);
double ww = iss * randgauss();
assert(w[i] < wn);
cweight[w[i]] = ww;
sw += ww;
}
assert(w[w.size() - 1] < wn);
// cweight[w[w.size() - 1]] = 0;
cweight[w[w.size() - 1]] = -sw/2.0;
}
encude(cweight, wn, weight);
delete[] cweight;
cuzero(m, wn);
double *one = new double[wn];
for (unsigned int wi = 0; wi < wn; ++wi)
one[wi] = 1.0;
encude(one, wn, v);
delete[] one;
}
}
| 0712e4b3431b558d9b891ee497af4f5c59507ad0.cu | #define __MAKEMORE_MEGATRON_CU__ 1
#include <stdio.h>
#include <math.h>
#include <vector>
#include <map>
#include "cudamem.hh"
#include "megatron.hh"
#include "mapfile.hh"
namespace makemore {
double adam_b1 = 0.9;
double adam_b2 = 0.999;
double adam_b3 = 0.5;
double adam_eps = 1e-8;
__global__ void gpu_megatron_feed(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double eta, double nu, bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int outi = blockIdx.x * blockDim.x + threadIdx.x;
if (outi >= outn)
return;
unsigned int outri = outi % outrn;
unsigned int mbi = outi / outrn;
unsigned int *inrip = oimap[outri];
unsigned int *wip = owmap[outri];
double sum = 0;
while (*inrip) {
unsigned int ini = mbi * inrn + *inrip - 1;
unsigned int wi = *wip;
sum += weight[wi] * in[ini];
++inrip;
++wip;
}
unsigned int wi = *wip;
sum += weight[wi] * 1.0;
if (activated) {
double q = 1.0 / (1.0 + exp(-sum));
out[outi] = q;
} else {
out[outi] = sum;
}
fout[outi] = 0.0;
}
__global__ void gpu_megatron_train0(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double eta, double nu, bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int outi = blockIdx.x * blockDim.x + threadIdx.x;
if (outi >= outn)
return;
double o = out[outi];
double fo = fout[outi];
if (o > 1.0)
o = 1.0;
else if (o < 0.0)
o = 0.0;
fout[outi] = fo * o * (1.0 - o);
}
__global__ void gpu_megatron_train1(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double eta, double nu, bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int ini = blockIdx.x * blockDim.x + threadIdx.x;
if (ini >= inn)
return;
unsigned int inri = ini % inrn;
unsigned int mbi = ini / inrn;
unsigned int *outrip = iomap[inri];
unsigned int *wip = iwmap[inri];
double sum = 0;
while (*outrip) {
unsigned int outi = mbi * outrn + *outrip - 1;
unsigned int wi = *wip;
sum += weight[wi] * fout[outi];
++outrip;
++wip;
}
fin[ini] += sum;
}
__global__ void gpu_megatron_train2(
const double *in,
double *fin, double *out, double *fout,
unsigned int inn, unsigned int outn,
unsigned int wn,
unsigned int **iwmap, unsigned int **owmap,
unsigned int **iomap, unsigned int **oimap,
unsigned int *wimap, unsigned int *womap,
double *weight,
double *m, double *v, double a, double b1, double b2, double b3, double eps,
bool activated,
unsigned int inrn, unsigned int outrn, unsigned int mbn
) {
unsigned int wi = blockIdx.x * blockDim.x + threadIdx.x;
if (wi >= wn)
return;
if (!(a > 0))
return;
unsigned int outri = womap[wi];
--outri;
unsigned int inri = wimap[wi];
if (inri == 0) {
for (unsigned int mbi = 0; mbi < mbn; ++mbi) {
unsigned int outi = mbi * outrn + outri;
double dw = fout[outi];
m[wi] = b1 * m[wi] + (1 - b1) * dw;
v[wi] = b2 * v[wi] + (1 - b2) * dw * dw;
weight[wi] += a * m[wi] / (pow(v[wi], b3) + eps);
//weight[wi] += a * dw;
}
} else {
--inri;
for (unsigned int mbi = 0; mbi < mbn; ++mbi) {
unsigned int outi = mbi * outrn + outri;
unsigned int ini = mbi * inrn + inri;
double dw = fout[outi] * in[ini];
m[wi] = b1 * m[wi] + (1 - b1) * dw;
v[wi] = b2 * v[wi] + (1 - b2) * dw * dw;
weight[wi] += a * m[wi] / (pow(v[wi], b3) + eps);
//weight[wi] += a * dw;
}
}
}
const double *Megatron::feed(const double *_in, double *_fin) {
in = _in;
fin = _fin;
int bs = 128;
int gs = (outn + bs - 1) / bs;
gpu_megatron_feed<<<gs, bs>>>(
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight, eta, 1.0, activated,
inrn, outrn, mbn
);
return out;
}
void Megatron::train(double nu) {
if (activated) {
int bs0 = 128;
int gs0 = (outn + bs0 - 1) / bs0;
gpu_megatron_train0<<<gs0, bs0>>>(
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight, eta, nu, activated,
inrn, outrn, mbn
);
}
if (fin) {
int bs1 = 128;
int gs1 = (inn + bs1 - 1) / bs1;
gpu_megatron_train1<<<gs1, bs1>>>(
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight, eta, nu, activated,
inrn, outrn, mbn
);
}
int bs2 = 128;
int gs2 = (wn + bs2 - 1) / bs2;
gpu_megatron_train2<<<gs2, bs2>>>(
in, fin, out, fout, inn, outn,
wn, iwmap, owmap, iomap, oimap, wimap, womap,
weight,
m,v, eta*nu, adam_b1, adam_b2, adam_b3, adam_eps,
activated,
inrn, outrn, mbn
);
}
Megatron::Megatron(const Wiring *_wire, Mapfile *_mapfile, unsigned int _mbn, double _eta, bool _activated)
: Tron(_wire->inn * _mbn, _wire->outn * _mbn)
{
mbn = _mbn;
assert(mbn > 0);
assert(inn % mbn == 0);
inrn = inn / mbn;
assert(outn % mbn == 0);
outrn = outn / mbn;
wire = _wire;
mapfile = _mapfile;
cumake(&out, outn);
cumake(&fout, outn);
cumake(&owmap, outrn);
cumake(&oimap, outrn);
cumake(&iomap, inrn);
cumake(&iwmap, inrn);
eta = _eta;
activated = _activated;
_makemaps();
cumake(&weight, wn);
mapfile->map(weight, wn);
mapfile->load(weight);
cumake(&m, wn);
mapfile->map(m, wn);
mapfile->load(m);
cumake(&v, wn);
mapfile->map(v, wn);
mapfile->load(v);
}
Megatron::~Megatron() {
cufree(out);
cufree(fout);
cufree(owmap);
cufree(oimap);
cufree(iwmap);
cufree(iomap);
cufree(womap);
cufree(wimap);
cufree(weight);
}
void Megatron::_makemaps() {
using namespace std;
vector< vector<unsigned int> > moi, mow, mio, miw;
assert(wire->outn == outrn);
assert(wire->inn == inrn);
moi.resize(wire->outn);
mow.resize(wire->outn);
mio.resize(wire->inn);
miw.resize(wire->inn);
unsigned int wi = 0;
wire->_makemaps(mio, miw, moi, mow);
wi += wire->wn;
for (unsigned int outri = 0; outri < wire->outn; ++outri) {
moi[outri].push_back(0);
mow[outri].push_back(wi);
++wi;
}
wn = wi;
std::vector<unsigned int> mwi, mwo;
mwi.resize(wn);
mwo.resize(wn);
for (unsigned int outri = 0; outri < wire->outn; ++outri) {
for (auto wip = mow[outri].begin(); wip != mow[outri].end(); ++wip) {
mwo[*wip] = outri + 1;
mwi[*wip] = 0;
}
}
for (unsigned int inri = 0; inri < wire->inn; ++inri) {
for (auto wip = miw[inri].begin(); wip != miw[inri].end(); ++wip) {
mwi[*wip] = inri + 1;
}
}
for (unsigned int inri = 0; inri < wire->inn; ++inri) {
mio[inri].push_back(0);
miw[inri].push_back((unsigned int)-1);
}
unsigned int mapbufn = 0;
for (unsigned int outri = 0; outri < outrn; ++outri) {
const vector<unsigned int>& v = moi[outri];
const vector<unsigned int>& w = mow[outri];
assert(v.size());
mapbufn += v.size();
assert(w.size());
mapbufn += w.size();
}
for (unsigned int inri = 0; inri < inrn; ++inri) {
const vector<unsigned int>& v = mio[inri];
const vector<unsigned int>& w = miw[inri];
assert(v.size());
mapbufn += v.size();
assert(w.size());
mapbufn += w.size();
}
cumake(&mapbuf, mapbufn);
unsigned int mapbufi = 0;
unsigned int *cmapbuf = new unsigned int[mapbufn];
unsigned int **coimap = new unsigned int *[outrn];
unsigned int **cowmap = new unsigned int *[outrn];
unsigned int **ciomap = new unsigned int *[inrn];
unsigned int **ciwmap = new unsigned int *[inrn];
for (unsigned int outri = 0; outri < outrn; ++outri) {
const vector<unsigned int>& v = moi[outri];
const vector<unsigned int>& w = mow[outri];
memcpy(cmapbuf + mapbufi, v.data(), v.size() * sizeof(unsigned int));
coimap[outri] = mapbuf + mapbufi;
mapbufi += v.size();
memcpy(cmapbuf + mapbufi, w.data(), w.size() * sizeof(unsigned int));
cowmap[outri] = mapbuf + mapbufi;
mapbufi += w.size();
}
for (unsigned int inri = 0; inri < inrn; ++inri) {
const vector<unsigned int>& v = mio[inri];
const vector<unsigned int>& w = miw[inri];
memcpy(cmapbuf + mapbufi, v.data(), v.size() * sizeof(unsigned int));
ciomap[inri] = mapbuf + mapbufi;
mapbufi += v.size();
memcpy(cmapbuf + mapbufi, w.data(), w.size() * sizeof(unsigned int));
ciwmap[inri] = mapbuf + mapbufi;
mapbufi += w.size();
}
assert(mapbufi == mapbufn);
encude(cmapbuf, mapbufn, mapbuf);
delete[] cmapbuf;
encude(coimap, outrn, oimap);
encude(cowmap, outrn, owmap);
encude(ciomap, inrn, iomap);
encude(ciwmap, inrn, iwmap);
delete[] ciomap;
delete[] ciwmap;
delete[] coimap;
delete[] cowmap;
_mow = mow;
cumake(&wimap, wn);
encude(mwi.data(), wn, wimap);
cumake(&womap, wn);
encude(mwo.data(), wn, womap);
}
void Megatron::randomize(double disp) {
using namespace std;
double *cweight = new double[wn];
for (unsigned int outri = 0; outri < outrn; ++outri) {
const vector<unsigned int>& w = _mow[outri];
assert(w.size());
double iss = disp / sqrt(w.size() - 1);
double sw = 0;
for (unsigned int i = 0; i < w.size() - 1; ++i) {
// double ww = iss * rnd(-1, 1);
double ww = iss * randgauss();
assert(w[i] < wn);
cweight[w[i]] = ww;
sw += ww;
}
assert(w[w.size() - 1] < wn);
// cweight[w[w.size() - 1]] = 0;
cweight[w[w.size() - 1]] = -sw/2.0;
}
encude(cweight, wn, weight);
delete[] cweight;
cuzero(m, wn);
double *one = new double[wn];
for (unsigned int wi = 0; wi < wn; ++wi)
one[wi] = 1.0;
encude(one, wn, v);
delete[] one;
}
}
|
60f34018bab0a4a5afab5cdaa1d42d7efbdffb5b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <intrin.h>
#include <ctime>
#pragma comment(lib, "cudart")
using namespace std;
#define MATRIX_SIZE 1024
#define BlockSize 32
// CUDA kernel: cubes each array value
void matrixMultiplicationWithCuda(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE], bool flagOptimozation);
void matrixMultiplicationCPU(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE]);
void checkCUDAStatus(hipError_t cudaStatus);
bool compareResults(int cudaMultiplicationResult[][MATRIX_SIZE],int cpuMultiplicationResult[][MATRIX_SIZE]);
__global__ void matrixMultiplicationKernel(int *A, int *B, int *C)
{
int result = 0;
int column = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//__syncthreads();
if (row > MATRIX_SIZE || column > MATRIX_SIZE) return;
for (int i = 0; i < MATRIX_SIZE; i++){
result += A[row*MATRIX_SIZE + i] * B[i*MATRIX_SIZE + column];
}
C[row*MATRIX_SIZE + column] = result;
}
__global__ void matrixMultiplicationWithOptimizationKernel(int *A, int *B, int *C)
{
__shared__ float ds_M[BlockSize][BlockSize];
__shared__ float ds_N[BlockSize][BlockSize];
int bx = blockIdx.x, by = blockIdx.y,
tx = threadIdx.x, ty = threadIdx.y,
Row = by * BlockSize + ty,
Col = bx * BlockSize + tx;
float Pvalue = 0;
for (int m = 0; m < (MATRIX_SIZE-1)/BlockSize+1; ++m) {
if (Row < MATRIX_SIZE && m*BlockSize+tx < MATRIX_SIZE)
ds_M[ty][tx] = A[Row*MATRIX_SIZE + m*BlockSize+tx];
else
ds_M[ty][tx] = 0;
if (Col < MATRIX_SIZE && m*BlockSize+ty < MATRIX_SIZE)
ds_N[ty][tx] = B[(m*BlockSize+ty)*MATRIX_SIZE+Col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < BlockSize; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
if (Row < MATRIX_SIZE && Col < MATRIX_SIZE)
C[Row*MATRIX_SIZE+Col] = Pvalue;
}
int main()
{
srand(time(0));
auto matrixA = new int[MATRIX_SIZE][MATRIX_SIZE];
auto matrixB = new int[MATRIX_SIZE][MATRIX_SIZE];
auto cudaMultiplicationResult = new int[MATRIX_SIZE][MATRIX_SIZE];
auto cudaWithOptimizationMultiplicationResult = new int[MATRIX_SIZE][MATRIX_SIZE];
auto cpuMultiplicationResult = new int[MATRIX_SIZE][MATRIX_SIZE];
for (int i = 0; i<MATRIX_SIZE; i++){
for (int j = 0; j < MATRIX_SIZE; j++){
matrixA[i][j] = rand() % 2048;
matrixB[i][j] = rand() % 2048;
cpuMultiplicationResult[i][j] = 0;
}
}
matrixMultiplicationWithCuda(matrixA, matrixB, cudaMultiplicationResult, false);
matrixMultiplicationWithCuda(matrixA, matrixB, cudaWithOptimizationMultiplicationResult, true);
matrixMultiplicationCPU(matrixA, matrixB, cpuMultiplicationResult);
if(compareResults(cudaMultiplicationResult, cpuMultiplicationResult)){
printf("Results are equals!\n");
}else{
printf("Results are NOT equals!\n");
}
system("pause");
delete[] matrixA;
delete[] matrixB;
delete[] cudaMultiplicationResult;
delete[] cpuMultiplicationResult;
}
void matrixMultiplicationWithCuda(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE], bool flagOptimization)
{
int *dev_a, *dev_b, *dev_c;
clock_t begin, end;
hipError_t cudaStatus;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
cudaStatus = hipMalloc((void**)&dev_a, ((MATRIX_SIZE)*(MATRIX_SIZE))*sizeof(int));
checkCUDAStatus(cudaStatus);
cudaStatus = hipMalloc((void**)&dev_b, ((MATRIX_SIZE)*(MATRIX_SIZE))*sizeof(int));
checkCUDAStatus(cudaStatus);
cudaStatus = hipMalloc((void**)&dev_c, ((MATRIX_SIZE)*(MATRIX_SIZE))*sizeof(int));
checkCUDAStatus(cudaStatus);
cudaStatus = hipMemcpy(dev_a, A, ((MATRIX_SIZE*MATRIX_SIZE))*sizeof(int), hipMemcpyHostToDevice);
checkCUDAStatus(cudaStatus);
cudaStatus = hipMemcpy(dev_b, B, ((MATRIX_SIZE*MATRIX_SIZE))*sizeof(int), hipMemcpyHostToDevice);
checkCUDAStatus(cudaStatus);
dim3 dimBlock(BlockSize, BlockSize);
dim3 dimGrid((MATRIX_SIZE + dimBlock.x - 1) / dimBlock.x, (MATRIX_SIZE + dimBlock.y - 1) / dimBlock.y);
hipEventRecord(start);
if(flagOptimization)
hipLaunchKernelGGL(( matrixMultiplicationWithOptimizationKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dev_a, dev_b, dev_c);
else
hipLaunchKernelGGL(( matrixMultiplicationKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dev_a, dev_b, dev_c);
hipEventRecord(stop);
hipEventSynchronize(stop);
cudaStatus = hipGetLastError();
checkCUDAStatus(cudaStatus);
cudaStatus = hipMemcpy(C, dev_c, ((MATRIX_SIZE*MATRIX_SIZE))*sizeof(int), hipMemcpyDeviceToHost);
checkCUDAStatus(cudaStatus);
float time;
hipEventElapsedTime(&time, start, stop);
if(flagOptimization)
printf("CUDA time with optimization: %f seconds\n", time / 1000);
else
printf("CUDA time: %f seconds\n", time / 1000);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
}
void matrixMultiplicationCPU(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE]){
clock_t begin, end;
begin = clock();
for (int row = 0; row < MATRIX_SIZE; row++) {
for (int col = 0; col < MATRIX_SIZE; col++) {
for (int inner = 0; inner < MATRIX_SIZE; inner++) {
C[row][col] += A[row][inner] * B[inner][col];
}
}
}
end = clock();
printf("CPU time: %lf seconds\n", (double)(end - begin)/CLOCKS_PER_SEC);
}
void checkCUDAStatus(hipError_t cudaStatus){
if(cudaStatus != hipSuccess){
printf("CUDA return error code: %d", cudaStatus);
exit(-1);
}
}
bool compareResults(int cudaMultiplicationResult[][MATRIX_SIZE],int cpuMultiplicationResult[][MATRIX_SIZE]){
for(int row = 0; row < MATRIX_SIZE; row++){
for(int column = 0; column < MATRIX_SIZE; column++){
if(cudaMultiplicationResult[row][column] != cpuMultiplicationResult[row][column]){
return false;
}
}
}
return true;
} | 60f34018bab0a4a5afab5cdaa1d42d7efbdffb5b.cu | #include <iostream>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <intrin.h>
#include <ctime>
#pragma comment(lib, "cudart")
using namespace std;
#define MATRIX_SIZE 1024
#define BlockSize 32
// CUDA kernel: cubes each array value
void matrixMultiplicationWithCuda(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE], bool flagOptimozation);
void matrixMultiplicationCPU(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE]);
void checkCUDAStatus(cudaError_t cudaStatus);
bool compareResults(int cudaMultiplicationResult[][MATRIX_SIZE],int cpuMultiplicationResult[][MATRIX_SIZE]);
__global__ void matrixMultiplicationKernel(int *A, int *B, int *C)
{
int result = 0;
int column = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//__syncthreads();
if (row > MATRIX_SIZE || column > MATRIX_SIZE) return;
for (int i = 0; i < MATRIX_SIZE; i++){
result += A[row*MATRIX_SIZE + i] * B[i*MATRIX_SIZE + column];
}
C[row*MATRIX_SIZE + column] = result;
}
__global__ void matrixMultiplicationWithOptimizationKernel(int *A, int *B, int *C)
{
__shared__ float ds_M[BlockSize][BlockSize];
__shared__ float ds_N[BlockSize][BlockSize];
int bx = blockIdx.x, by = blockIdx.y,
tx = threadIdx.x, ty = threadIdx.y,
Row = by * BlockSize + ty,
Col = bx * BlockSize + tx;
float Pvalue = 0;
for (int m = 0; m < (MATRIX_SIZE-1)/BlockSize+1; ++m) {
if (Row < MATRIX_SIZE && m*BlockSize+tx < MATRIX_SIZE)
ds_M[ty][tx] = A[Row*MATRIX_SIZE + m*BlockSize+tx];
else
ds_M[ty][tx] = 0;
if (Col < MATRIX_SIZE && m*BlockSize+ty < MATRIX_SIZE)
ds_N[ty][tx] = B[(m*BlockSize+ty)*MATRIX_SIZE+Col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < BlockSize; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
if (Row < MATRIX_SIZE && Col < MATRIX_SIZE)
C[Row*MATRIX_SIZE+Col] = Pvalue;
}
int main()
{
srand(time(0));
auto matrixA = new int[MATRIX_SIZE][MATRIX_SIZE];
auto matrixB = new int[MATRIX_SIZE][MATRIX_SIZE];
auto cudaMultiplicationResult = new int[MATRIX_SIZE][MATRIX_SIZE];
auto cudaWithOptimizationMultiplicationResult = new int[MATRIX_SIZE][MATRIX_SIZE];
auto cpuMultiplicationResult = new int[MATRIX_SIZE][MATRIX_SIZE];
for (int i = 0; i<MATRIX_SIZE; i++){
for (int j = 0; j < MATRIX_SIZE; j++){
matrixA[i][j] = rand() % 2048;
matrixB[i][j] = rand() % 2048;
cpuMultiplicationResult[i][j] = 0;
}
}
matrixMultiplicationWithCuda(matrixA, matrixB, cudaMultiplicationResult, false);
matrixMultiplicationWithCuda(matrixA, matrixB, cudaWithOptimizationMultiplicationResult, true);
matrixMultiplicationCPU(matrixA, matrixB, cpuMultiplicationResult);
if(compareResults(cudaMultiplicationResult, cpuMultiplicationResult)){
printf("Results are equals!\n");
}else{
printf("Results are NOT equals!\n");
}
system("pause");
delete[] matrixA;
delete[] matrixB;
delete[] cudaMultiplicationResult;
delete[] cpuMultiplicationResult;
}
void matrixMultiplicationWithCuda(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE], bool flagOptimization)
{
int *dev_a, *dev_b, *dev_c;
clock_t begin, end;
cudaError_t cudaStatus;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStatus = cudaMalloc((void**)&dev_a, ((MATRIX_SIZE)*(MATRIX_SIZE))*sizeof(int));
checkCUDAStatus(cudaStatus);
cudaStatus = cudaMalloc((void**)&dev_b, ((MATRIX_SIZE)*(MATRIX_SIZE))*sizeof(int));
checkCUDAStatus(cudaStatus);
cudaStatus = cudaMalloc((void**)&dev_c, ((MATRIX_SIZE)*(MATRIX_SIZE))*sizeof(int));
checkCUDAStatus(cudaStatus);
cudaStatus = cudaMemcpy(dev_a, A, ((MATRIX_SIZE*MATRIX_SIZE))*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAStatus(cudaStatus);
cudaStatus = cudaMemcpy(dev_b, B, ((MATRIX_SIZE*MATRIX_SIZE))*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAStatus(cudaStatus);
dim3 dimBlock(BlockSize, BlockSize);
dim3 dimGrid((MATRIX_SIZE + dimBlock.x - 1) / dimBlock.x, (MATRIX_SIZE + dimBlock.y - 1) / dimBlock.y);
cudaEventRecord(start);
if(flagOptimization)
matrixMultiplicationWithOptimizationKernel <<< dimGrid, dimBlock >>>(dev_a, dev_b, dev_c);
else
matrixMultiplicationKernel <<< dimGrid, dimBlock >>>(dev_a, dev_b, dev_c);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaStatus = cudaGetLastError();
checkCUDAStatus(cudaStatus);
cudaStatus = cudaMemcpy(C, dev_c, ((MATRIX_SIZE*MATRIX_SIZE))*sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAStatus(cudaStatus);
float time;
cudaEventElapsedTime(&time, start, stop);
if(flagOptimization)
printf("CUDA time with optimization: %f seconds\n", time / 1000);
else
printf("CUDA time: %f seconds\n", time / 1000);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
void matrixMultiplicationCPU(int A[][MATRIX_SIZE],int B[][MATRIX_SIZE],int C[][MATRIX_SIZE]){
clock_t begin, end;
begin = clock();
for (int row = 0; row < MATRIX_SIZE; row++) {
for (int col = 0; col < MATRIX_SIZE; col++) {
for (int inner = 0; inner < MATRIX_SIZE; inner++) {
C[row][col] += A[row][inner] * B[inner][col];
}
}
}
end = clock();
printf("CPU time: %lf seconds\n", (double)(end - begin)/CLOCKS_PER_SEC);
}
void checkCUDAStatus(cudaError_t cudaStatus){
if(cudaStatus != cudaSuccess){
printf("CUDA return error code: %d", cudaStatus);
exit(-1);
}
}
bool compareResults(int cudaMultiplicationResult[][MATRIX_SIZE],int cpuMultiplicationResult[][MATRIX_SIZE]){
for(int row = 0; row < MATRIX_SIZE; row++){
for(int column = 0; column < MATRIX_SIZE; column++){
if(cudaMultiplicationResult[row][column] != cpuMultiplicationResult[row][column]){
return false;
}
}
}
return true;
} |
c210fadc57f0e033b444ad68ede5c819139d1003.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#define uint uint32_t
#define WST 8
#define WSU 8
#define WSV 8
#define WS (WST*WSU*WSV)
#define CELL_LENGTH 4
#define CELL_SIZE (CELL_LENGTH*CELL_LENGTH*CELL_LENGTH)
#define BLOCK_SIZE (WS*CELL_SIZE)
#define WLT (WST*CELL_LENGTH)
#define WLU (WSU*CELL_LENGTH)
#define WLV (WSV*CELL_LENGTH)
#define WS_MASK (WS-1)
#define TID_MASK (WST-1)
#define UID_MASK (WSU-1)
#define VID_MASK (WSV-1)
#include <stdint.h>
#define uint uint32_t
__device__ uint Rng4(uint4& state){
uint t=state.w;
t^= t << 11;
t^= t >> 8;
state.w=state.z; state.z=state.y; state.y=state.x;
t ^= state.x;
t ^= state.x>>19;
state.x=t;
return t;
}
__device__ int TUVToIndex(int t, int u, int v){
int index=0;
index |= (t&0x3)<<9;
index |= (u&0x3)<<11;
index |= (v&0x3)<<13;
index |= (t>>2)+(u>>2)*WST+(v>>2)*WST*WSU;
return index;
}
__device__ void IndexToTUV(int index, int& t, int& u, int& v){
t=(index>> 9)&0x3;
u=(index>>11)&0x3;
v=(index>>13)&0x3;
t+= (index&0x7)<<2;
u+= (index&0x38)>>1;
v+= (index&0x1c0)>>4;
}
__device__ int AddUnitToIndex(int unit, int index, int& OOB){
int dt = ((unit>>0)&0x1);
int du = ((unit>>1)&0x1);
int dv = ((unit>>2)&0x1);
int dw = (unit>>3)&0x1;
int t,u,v;
IndexToTUV(index, t,u,v);
t+= dt-dw;
u+= du-dw;
v+= dv-dw;
OOB = t&(~(WLT-1));
OOB |= u&(~(WLU-1));
OOB |= v&(~(WLV-1));
int newIndex = TUVToIndex(t,u,v);
return newIndex;
}
__device__ uint GPUValidateAddUnitVectors(int a, int b, int& c){
int valid;
if((a|b) != 0xf && (a&b))
return 0;
c = (((a|b)==0xf)?(a&b):(a|b));
valid = (c==0x3||c==0xc)?0:1;
return valid;
}
__device__ uint GPUAddUnitVectors(uint a, uint b){
return (((a|b)==0xf)?(a&b):(a|b));
}
__device__ void TransForw(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
if(!latSiteComplete) return;
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSl=newSiteComp&0x40;
if(sl+newSl==0) return;
uint rand = Rng4(rngState);
int newBond1 = (trans[next/4]>>(4*(2*(next%4)+(rand&0x1))))&0xf;
int newBond2 = GPUAddUnitVectors((~newBond1)&0xf, next);
int temp = newBond1;
newBond1 = (rand&0x2)?newBond1:newBond2;
newBond2 = (rand&0x2)?newBond2:temp;
int destIndex = AddUnitToIndex(newBond1,index, OOB);
if(OOB) return;
int destSiteComp = lattice[destIndex];
if(destSiteComp) return;
int moveFirst;
if(sl+newSl==0x80){
moveFirst = (rand&0x4)>>2;
}
else if(sl)
moveFirst = 1;
else
moveFirst = 0;
destSiteComp = newBond2;
if(moveFirst){
latSiteComplete = newBond1|((label>>1)&0x10);
destSiteComp |= label&0x10;
}
else{
latSiteComplete = newBond1|label|sl;
destSiteComp |= (newSiteComp&0x20)>>1;
newSiteComp = newSiteComp&0x1f;
}
lattice[index] = latSiteComplete;
lattice[destIndex] = destSiteComp;
if(!moveFirst)
lattice[newIndex] = newSiteComp;
}
__device__ void TransBack(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int srcIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int srcSiteComp = lattice[srcIndex];
int srcNext = srcSiteComp&0xf;
int srcLabel= srcSiteComp&0x30;
int srcSl = srcSiteComp&0x40;
int newNext;
if(srcSl) return;
if(!GPUValidateAddUnitVectors(next, srcNext, newNext)) return;
int newIndex = AddUnitToIndex(newNext, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteSl = newSiteComp&0x40;
if(sl+newSiteSl == 0x80) return;
uint rand = Rng4(rngState);
int moveFirst;
if(sl+newSiteSl == 0x0){
moveFirst = rand&0x1;
}
else if(sl == 0x40)
moveFirst = 0;
else
moveFirst = 1;
if(moveFirst){
latSiteComplete = newNext|(label<<1)|srcLabel|0x40;
}
else{
latSiteComplete = newNext|label|sl;
newSiteComp = (newSiteComp&0x3f)|(srcLabel<<1)|0x40;
}
lattice[srcIndex]=0;
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
}
__device__ void DiffuseSL(char* lattice, int index){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteLabel = newSiteComp&0x30;
int newSiteSl = newSiteComp&0x40;
if(newSiteSl + sl != 0x40) return;
if(sl){
newSiteComp = newSiteComp | ((label&0x10)<<1) | 0x40;
latSiteComplete = next|((label>>1)&0x10);
}
else{
latSiteComplete = next|(label<<1)|((newSiteLabel>>1)&0x10)|0x40;
newSiteComp = newSiteComp&0x1f;
}
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
}
__global__ void polmove(int nStep, uint4* seeds, char* srcLattice, char* dstLattice, uint* gTrans, int dtuv, int dtuv_next, uint NWT, uint NWU, uint NWV){
__shared__ char lattice[BLOCK_SIZE];
uint trans[4];
int lid = threadIdx.x;
int wid = blockIdx.x;
int gid = wid * blockDim.x + lid;
int widt = wid%NWT;
int widu = (wid/NWT)%NWU;
int widv = wid/(NWU*NWT);
uint4 rngl;
uint4 rngp;
uint site;
int dt = dtuv%WLT;
int du = (dtuv/WLT)%(WLU);
int dv = dtuv/(WLT*WLU);
int p=0;
int dtBlock=WLT-dt;
int duBlock=WLU-du;
int dvBlock=WLV-dv;
int pSwitchNext=dtBlock*duBlock*dvBlock;
int memOffSet=0;
// printf("pSwitchNext=%i\n", pSwitchNext);
int src;
for(src=lid*4; src<BLOCK_SIZE; src += 4*WS){
for(int i=0; i<4 && i+src<BLOCK_SIZE; i++){
while(i+src>=pSwitchNext){
memOffSet = pSwitchNext;
p++;
dtBlock = (p&0x1)?dt:(WLT-dt);
duBlock = (p&0x2)?du:(WLU-du);
dvBlock = (p&0x4)?dv:(WLV-dv);
pSwitchNext += dtBlock*duBlock*dvBlock;
}
int offSet = src+i-memOffSet;
int t = ((p&0x1)?(WLT-dt):0) + (offSet%dtBlock);
int u = ((p&0x2)?(WLU-du):0) + ((offSet/dtBlock)%duBlock);
int v = ((p&0x4)?(WLV-dv):0) + (offSet/(dtBlock*duBlock));
int index = TUVToIndex(t,u,v);
lattice[index]=srcLattice[src+i+wid*BLOCK_SIZE];
}
}
for(int i=0; i<4; i++) trans[i] = gTrans[i];
int indexStart = ((lid&0x1f)<<2)|((lid&0x60)>>5)|(lid&0x180);
rngp = seeds[gid*2];
rngl = seeds[gid*2+1];
__syncthreads();
for(int i=0; i<nStep; i++){
uint randLoc = Rng4(rngl);;
site = indexStart | ((randLoc&0x3f)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc >>= 6;
site = indexStart | ((randLoc&0x3f)<<9);
DiffuseSL(lattice, site); __syncthreads();
randLoc >>= 6;
site = indexStart | ((randLoc&0x3f)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc >>= 6;
site = indexStart | ((randLoc&0x3f)<<9);
TransBack(lattice, site, trans, rngp); __syncthreads();
}
dt = dtuv_next%WLT;
du = (dtuv_next/WLT)%(WLU);
dv = dtuv_next/(WLT*WLU);
memOffSet=0;
// printf("????\n");
for(int p=0; p<8; p++){
int dtBlock = (p&0x1)?dt:(WLT-dt);
int duBlock = (p&0x2)?du:(WLU-du);
int dvBlock = (p&0x4)?dv:(WLV-dv);
int dstWid = (widt+NWT-(((p>>0)&0x1)))%NWT;
dstWid += ((widu+NWU-(((p>>1)&0x1)))%NWU)*NWT;
dstWid += ((widv+NWV-(((p>>2)&0x1)))%NWV)*NWT*NWU;
// if(lid==0)
// printf("p=%i, wid=(%i,%i,%i), dstWid=(%i,%i,%i)=%i\n", p,widt,widu,widv,(widt+NWT-(((p>>0)&0x1)))%NWT, ((widu+NWU-(((p>>1)&0x1)))%NWU), ((widv+NWV-(((p>>2)&0x1)))%NWV), dstWid);
// if(lid==0 && wid==0)
// printf("block=(%i,%i,%i), p=%i\n", dtBlock, duBlock, dvBlock, p);
for(int i=lid; i<dtBlock*duBlock*dvBlock; i+=WS){
int t = i%dtBlock + ((p&0x1)?0:dt);
int u = (i/dtBlock)%duBlock + ((p&0x2)?0:du);
int v = i/(dtBlock*duBlock) + ((p&0x4)?0:dv);
int dst = dstWid*BLOCK_SIZE+memOffSet+i;
int index = TUVToIndex(t, u, v);
// if(lid%55==0)
// printf("dstWid=%i,%i (p=%i), memOffSet=%i, i=%i, (%i,%i,%i)\n", dstWid, dst, p, memOffSet, i, t,u,v);
dstLattice[dst] = lattice[index];
}
memOffSet += dtBlock*duBlock*dvBlock;
}
seeds[gid*2]=rngp;
seeds[gid*2+1]=rngl;
__syncthreads();
}
| c210fadc57f0e033b444ad68ede5c819139d1003.cu | #include <stdint.h>
#include <stdio.h>
#define uint uint32_t
#define WST 8
#define WSU 8
#define WSV 8
#define WS (WST*WSU*WSV)
#define CELL_LENGTH 4
#define CELL_SIZE (CELL_LENGTH*CELL_LENGTH*CELL_LENGTH)
#define BLOCK_SIZE (WS*CELL_SIZE)
#define WLT (WST*CELL_LENGTH)
#define WLU (WSU*CELL_LENGTH)
#define WLV (WSV*CELL_LENGTH)
#define WS_MASK (WS-1)
#define TID_MASK (WST-1)
#define UID_MASK (WSU-1)
#define VID_MASK (WSV-1)
#include <stdint.h>
#define uint uint32_t
__device__ uint Rng4(uint4& state){
uint t=state.w;
t^= t << 11;
t^= t >> 8;
state.w=state.z; state.z=state.y; state.y=state.x;
t ^= state.x;
t ^= state.x>>19;
state.x=t;
return t;
}
__device__ int TUVToIndex(int t, int u, int v){
int index=0;
index |= (t&0x3)<<9;
index |= (u&0x3)<<11;
index |= (v&0x3)<<13;
index |= (t>>2)+(u>>2)*WST+(v>>2)*WST*WSU;
return index;
}
__device__ void IndexToTUV(int index, int& t, int& u, int& v){
t=(index>> 9)&0x3;
u=(index>>11)&0x3;
v=(index>>13)&0x3;
t+= (index&0x7)<<2;
u+= (index&0x38)>>1;
v+= (index&0x1c0)>>4;
}
__device__ int AddUnitToIndex(int unit, int index, int& OOB){
int dt = ((unit>>0)&0x1);
int du = ((unit>>1)&0x1);
int dv = ((unit>>2)&0x1);
int dw = (unit>>3)&0x1;
int t,u,v;
IndexToTUV(index, t,u,v);
t+= dt-dw;
u+= du-dw;
v+= dv-dw;
OOB = t&(~(WLT-1));
OOB |= u&(~(WLU-1));
OOB |= v&(~(WLV-1));
int newIndex = TUVToIndex(t,u,v);
return newIndex;
}
__device__ uint GPUValidateAddUnitVectors(int a, int b, int& c){
int valid;
if((a|b) != 0xf && (a&b))
return 0;
c = (((a|b)==0xf)?(a&b):(a|b));
valid = (c==0x3||c==0xc)?0:1;
return valid;
}
__device__ uint GPUAddUnitVectors(uint a, uint b){
return (((a|b)==0xf)?(a&b):(a|b));
}
__device__ void TransForw(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
if(!latSiteComplete) return;
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSl=newSiteComp&0x40;
if(sl+newSl==0) return;
uint rand = Rng4(rngState);
int newBond1 = (trans[next/4]>>(4*(2*(next%4)+(rand&0x1))))&0xf;
int newBond2 = GPUAddUnitVectors((~newBond1)&0xf, next);
int temp = newBond1;
newBond1 = (rand&0x2)?newBond1:newBond2;
newBond2 = (rand&0x2)?newBond2:temp;
int destIndex = AddUnitToIndex(newBond1,index, OOB);
if(OOB) return;
int destSiteComp = lattice[destIndex];
if(destSiteComp) return;
int moveFirst;
if(sl+newSl==0x80){
moveFirst = (rand&0x4)>>2;
}
else if(sl)
moveFirst = 1;
else
moveFirst = 0;
destSiteComp = newBond2;
if(moveFirst){
latSiteComplete = newBond1|((label>>1)&0x10);
destSiteComp |= label&0x10;
}
else{
latSiteComplete = newBond1|label|sl;
destSiteComp |= (newSiteComp&0x20)>>1;
newSiteComp = newSiteComp&0x1f;
}
lattice[index] = latSiteComplete;
lattice[destIndex] = destSiteComp;
if(!moveFirst)
lattice[newIndex] = newSiteComp;
}
__device__ void TransBack(char* lattice, int index, uint* trans, uint4& rngState){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int srcIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int srcSiteComp = lattice[srcIndex];
int srcNext = srcSiteComp&0xf;
int srcLabel= srcSiteComp&0x30;
int srcSl = srcSiteComp&0x40;
int newNext;
if(srcSl) return;
if(!GPUValidateAddUnitVectors(next, srcNext, newNext)) return;
int newIndex = AddUnitToIndex(newNext, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteSl = newSiteComp&0x40;
if(sl+newSiteSl == 0x80) return;
uint rand = Rng4(rngState);
int moveFirst;
if(sl+newSiteSl == 0x0){
moveFirst = rand&0x1;
}
else if(sl == 0x40)
moveFirst = 0;
else
moveFirst = 1;
if(moveFirst){
latSiteComplete = newNext|(label<<1)|srcLabel|0x40;
}
else{
latSiteComplete = newNext|label|sl;
newSiteComp = (newSiteComp&0x3f)|(srcLabel<<1)|0x40;
}
lattice[srcIndex]=0;
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
}
__device__ void DiffuseSL(char* lattice, int index){
int OOB;
int latSiteComplete = lattice[index];
int next = latSiteComplete&0xf;
int label = latSiteComplete&0x30;
int sl = latSiteComplete&0x40;
if(!latSiteComplete) return;
int newIndex = AddUnitToIndex(next, index, OOB);
if(OOB) return;
int newSiteComp = lattice[newIndex];
int newSiteLabel = newSiteComp&0x30;
int newSiteSl = newSiteComp&0x40;
if(newSiteSl + sl != 0x40) return;
if(sl){
newSiteComp = newSiteComp | ((label&0x10)<<1) | 0x40;
latSiteComplete = next|((label>>1)&0x10);
}
else{
latSiteComplete = next|(label<<1)|((newSiteLabel>>1)&0x10)|0x40;
newSiteComp = newSiteComp&0x1f;
}
lattice[index] = latSiteComplete;
lattice[newIndex] = newSiteComp;
}
__global__ void polmove(int nStep, uint4* seeds, char* srcLattice, char* dstLattice, uint* gTrans, int dtuv, int dtuv_next, uint NWT, uint NWU, uint NWV){
__shared__ char lattice[BLOCK_SIZE];
uint trans[4];
int lid = threadIdx.x;
int wid = blockIdx.x;
int gid = wid * blockDim.x + lid;
int widt = wid%NWT;
int widu = (wid/NWT)%NWU;
int widv = wid/(NWU*NWT);
uint4 rngl;
uint4 rngp;
uint site;
int dt = dtuv%WLT;
int du = (dtuv/WLT)%(WLU);
int dv = dtuv/(WLT*WLU);
int p=0;
int dtBlock=WLT-dt;
int duBlock=WLU-du;
int dvBlock=WLV-dv;
int pSwitchNext=dtBlock*duBlock*dvBlock;
int memOffSet=0;
// printf("pSwitchNext=%i\n", pSwitchNext);
int src;
for(src=lid*4; src<BLOCK_SIZE; src += 4*WS){
for(int i=0; i<4 && i+src<BLOCK_SIZE; i++){
while(i+src>=pSwitchNext){
memOffSet = pSwitchNext;
p++;
dtBlock = (p&0x1)?dt:(WLT-dt);
duBlock = (p&0x2)?du:(WLU-du);
dvBlock = (p&0x4)?dv:(WLV-dv);
pSwitchNext += dtBlock*duBlock*dvBlock;
}
int offSet = src+i-memOffSet;
int t = ((p&0x1)?(WLT-dt):0) + (offSet%dtBlock);
int u = ((p&0x2)?(WLU-du):0) + ((offSet/dtBlock)%duBlock);
int v = ((p&0x4)?(WLV-dv):0) + (offSet/(dtBlock*duBlock));
int index = TUVToIndex(t,u,v);
lattice[index]=srcLattice[src+i+wid*BLOCK_SIZE];
}
}
for(int i=0; i<4; i++) trans[i] = gTrans[i];
int indexStart = ((lid&0x1f)<<2)|((lid&0x60)>>5)|(lid&0x180);
rngp = seeds[gid*2];
rngl = seeds[gid*2+1];
__syncthreads();
for(int i=0; i<nStep; i++){
uint randLoc = Rng4(rngl);;
site = indexStart | ((randLoc&0x3f)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc >>= 6;
site = indexStart | ((randLoc&0x3f)<<9);
DiffuseSL(lattice, site); __syncthreads();
randLoc >>= 6;
site = indexStart | ((randLoc&0x3f)<<9);
TransForw(lattice, site, trans, rngp); __syncthreads();
randLoc >>= 6;
site = indexStart | ((randLoc&0x3f)<<9);
TransBack(lattice, site, trans, rngp); __syncthreads();
}
dt = dtuv_next%WLT;
du = (dtuv_next/WLT)%(WLU);
dv = dtuv_next/(WLT*WLU);
memOffSet=0;
// printf("????\n");
for(int p=0; p<8; p++){
int dtBlock = (p&0x1)?dt:(WLT-dt);
int duBlock = (p&0x2)?du:(WLU-du);
int dvBlock = (p&0x4)?dv:(WLV-dv);
int dstWid = (widt+NWT-(((p>>0)&0x1)))%NWT;
dstWid += ((widu+NWU-(((p>>1)&0x1)))%NWU)*NWT;
dstWid += ((widv+NWV-(((p>>2)&0x1)))%NWV)*NWT*NWU;
// if(lid==0)
// printf("p=%i, wid=(%i,%i,%i), dstWid=(%i,%i,%i)=%i\n", p,widt,widu,widv,(widt+NWT-(((p>>0)&0x1)))%NWT, ((widu+NWU-(((p>>1)&0x1)))%NWU), ((widv+NWV-(((p>>2)&0x1)))%NWV), dstWid);
// if(lid==0 && wid==0)
// printf("block=(%i,%i,%i), p=%i\n", dtBlock, duBlock, dvBlock, p);
for(int i=lid; i<dtBlock*duBlock*dvBlock; i+=WS){
int t = i%dtBlock + ((p&0x1)?0:dt);
int u = (i/dtBlock)%duBlock + ((p&0x2)?0:du);
int v = i/(dtBlock*duBlock) + ((p&0x4)?0:dv);
int dst = dstWid*BLOCK_SIZE+memOffSet+i;
int index = TUVToIndex(t, u, v);
// if(lid%55==0)
// printf("dstWid=%i,%i (p=%i), memOffSet=%i, i=%i, (%i,%i,%i)\n", dstWid, dst, p, memOffSet, i, t,u,v);
dstLattice[dst] = lattice[index];
}
memOffSet += dtBlock*duBlock*dvBlock;
}
seeds[gid*2]=rngp;
seeds[gid*2+1]=rngl;
__syncthreads();
}
|
5d13d78e29fbed4f49adf871680d8259891b73a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/elementwise_ops.h"
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Not,
UnaryElementwiseOp<BoolTypes, CUDAContext, NotFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
Sign,
UnaryElementwiseOp<NumericTypes, CUDAContext, SignFunctor<CUDAContext>>);
#define REGISTER_CUDA_COMPARE_OPERATOR(Op) \
REGISTER_CUDA_OPERATOR( \
Op, \
BinaryElementwiseOp< \
TensorTypes<bool, int32_t, int64_t, float, double>, \
CUDAContext, \
Op##Functor<CUDAContext>, \
FixedType<bool>>)
REGISTER_CUDA_COMPARE_OPERATOR(EQ);
REGISTER_CUDA_COMPARE_OPERATOR(NE);
REGISTER_CUDA_COMPARE_OPERATOR(LT);
REGISTER_CUDA_COMPARE_OPERATOR(LE);
REGISTER_CUDA_COMPARE_OPERATOR(GT);
REGISTER_CUDA_COMPARE_OPERATOR(GE);
#undef REGISTER_CUDA_COMPARE_OPERATOR
#define REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(Op) \
REGISTER_CUDA_OPERATOR( \
Op, \
BinaryElementwiseOp<BoolTypes, CUDAContext, Op##Functor<CUDAContext>>)
REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(And);
REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(Or);
REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(Xor);
#undef REGISTER_CUDA_LOGICAL_BINARY_OPERATOR
#define REGISTER_CUDA_BITWISE_BINARY_OPERATOR(Op) \
REGISTER_CUDA_OPERATOR( \
Op, \
BinaryElementwiseOp< \
IntBoolTypes, \
CUDAContext, \
Op##Functor<CUDAContext>>)
REGISTER_CUDA_BITWISE_BINARY_OPERATOR(BitwiseAnd);
REGISTER_CUDA_BITWISE_BINARY_OPERATOR(BitwiseOr);
REGISTER_CUDA_BITWISE_BINARY_OPERATOR(BitwiseXor);
#undef REGISTER_CUDA_BITWISE_BINARY_OPERATOR
namespace {
template <typename T>
__global__ void
reduce_sum_like_post1(const T* g_idata, T* g_odata, int pre, int N) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= N) {
return;
}
float sum = 0.0;
for (int i = 0; i < pre; ++i) {
sum += convert::To<T, float>(g_idata[i * N + n]);
}
g_odata[n] = convert::To<float, T>(sum);
}
template <typename T>
void device_reduce(
const T* d_in,
T* d_out,
int N,
Tensor<CUDAContext>* buffer,
CUDAContext* context) {
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Sum(
NULL, temp_storage_bytes, d_in, d_out, N, context->cuda_stream());
auto buffer_size = temp_storage_bytes / sizeof(T);
buffer_size += temp_storage_bytes % sizeof(T) != 0 ? 1 : 0;
buffer->Resize(buffer_size);
void* d_temp_storage = static_cast<void*>(buffer->template mutable_data<T>());
// Run sum-reduction
hipcub::DeviceReduce::Sum(
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
N,
context->cuda_stream());
}
template <>
void device_reduce<float16>(
const float16* in,
float16* out,
int N,
Tensor<CUDAContext>* buffer,
CUDAContext* context) {
auto buffer_size = 1;
if (buffer->size() != buffer_size) {
buffer->Resize(buffer_size);
math::Set<float16, CUDAContext>(
N,
convert::To<float, float16>(1.),
buffer->mutable_data<float16>(),
context);
}
CUBLAS_ENFORCE(hipblasDotEx_v2(
context->cublas_handle(),
N,
in,
HIP_R_16F,
1,
buffer->data<float16>(),
HIP_R_16F,
0,
out,
HIP_R_16F,
HIP_R_32F));
}
template <typename T, int BLOCK_THREADS>
__global__ void
reduce_sum_like(const T* g_idata, T* g_odata, int pre, int N, int post) {
int n = blockIdx.x;
float sum = 0.0;
int limit = pre * post;
for (int i = threadIdx.x; i < limit; i += blockDim.x) {
int curPre = i / post;
int curPost = i % post;
sum +=
convert::To<T, float>(g_idata[curPre * N * post + n * post + curPost]);
}
// uses a shared memory reduction within block
typedef hipcub::BlockReduce<float, BLOCK_THREADS> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
float aggregate = BlockReduceT(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
g_odata[n] = convert::To<float, T>(aggregate);
}
}
} // namespace
template <>
template <typename T>
bool SumReduceLikeOp<CUDAContext>::DoRunWithType() {
const auto& A = Input(0);
const auto& B = Input(1);
auto* C = Output(0);
auto count = A.size();
CAFFE_ENFORCE(&B != C, "In-place is not allowed.");
C->ResizeLike(B);
const T* Adata = A.template data<T>();
auto* Cdata = C->template mutable_data<T>();
if (B.size() == 1) {
device_reduce<T>(Adata, Cdata, count, &sum_buffer_, &context_);
} else {
size_t pre, n, post;
std::tie(pre, n, post) = ComputeLegacyBroadcastSizes(A, B, axis_);
// because we check shape(B) \in shape(A) before,
// post and pre cannot be 1 at same time
if (post == 1) {
hipLaunchKernelGGL(( reduce_sum_like_post1<T>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), Adata, Cdata, pre, n);
} else {
if (post >= 128) {
hipLaunchKernelGGL(( reduce_sum_like<T, 512>)
, dim3(n), dim3(512), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
} else if (post >= 64) {
hipLaunchKernelGGL(( reduce_sum_like<T, 128>)
, dim3(n), dim3(128), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
} else if (post >= 32) {
hipLaunchKernelGGL(( reduce_sum_like<T, 64>)
, dim3(n), dim3(64), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
} else {
hipLaunchKernelGGL(( reduce_sum_like<T, 32>)
, dim3(n), dim3(32), 0, context_.cuda_stream(), Adata, Cdata, pre, n, post);
}
}
}
return true;
}
template <>
bool SumReduceLikeOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, float16>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR(SumReduceLike, SumReduceLikeOp<CUDAContext>);
} // namespace caffe2
| 5d13d78e29fbed4f49adf871680d8259891b73a0.cu | #include "caffe2/operators/elementwise_ops.h"
#include <cub/block/block_load.cuh>
#include <cub/block/block_reduce.cuh>
#include <cub/device/device_reduce.cuh>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Not,
UnaryElementwiseOp<BoolTypes, CUDAContext, NotFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
Sign,
UnaryElementwiseOp<NumericTypes, CUDAContext, SignFunctor<CUDAContext>>);
#define REGISTER_CUDA_COMPARE_OPERATOR(Op) \
REGISTER_CUDA_OPERATOR( \
Op, \
BinaryElementwiseOp< \
TensorTypes<bool, int32_t, int64_t, float, double>, \
CUDAContext, \
Op##Functor<CUDAContext>, \
FixedType<bool>>)
REGISTER_CUDA_COMPARE_OPERATOR(EQ);
REGISTER_CUDA_COMPARE_OPERATOR(NE);
REGISTER_CUDA_COMPARE_OPERATOR(LT);
REGISTER_CUDA_COMPARE_OPERATOR(LE);
REGISTER_CUDA_COMPARE_OPERATOR(GT);
REGISTER_CUDA_COMPARE_OPERATOR(GE);
#undef REGISTER_CUDA_COMPARE_OPERATOR
#define REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(Op) \
REGISTER_CUDA_OPERATOR( \
Op, \
BinaryElementwiseOp<BoolTypes, CUDAContext, Op##Functor<CUDAContext>>)
REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(And);
REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(Or);
REGISTER_CUDA_LOGICAL_BINARY_OPERATOR(Xor);
#undef REGISTER_CUDA_LOGICAL_BINARY_OPERATOR
#define REGISTER_CUDA_BITWISE_BINARY_OPERATOR(Op) \
REGISTER_CUDA_OPERATOR( \
Op, \
BinaryElementwiseOp< \
IntBoolTypes, \
CUDAContext, \
Op##Functor<CUDAContext>>)
REGISTER_CUDA_BITWISE_BINARY_OPERATOR(BitwiseAnd);
REGISTER_CUDA_BITWISE_BINARY_OPERATOR(BitwiseOr);
REGISTER_CUDA_BITWISE_BINARY_OPERATOR(BitwiseXor);
#undef REGISTER_CUDA_BITWISE_BINARY_OPERATOR
namespace {
template <typename T>
__global__ void
reduce_sum_like_post1(const T* g_idata, T* g_odata, int pre, int N) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= N) {
return;
}
float sum = 0.0;
for (int i = 0; i < pre; ++i) {
sum += convert::To<T, float>(g_idata[i * N + n]);
}
g_odata[n] = convert::To<float, T>(sum);
}
template <typename T>
void device_reduce(
const T* d_in,
T* d_out,
int N,
Tensor<CUDAContext>* buffer,
CUDAContext* context) {
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Sum(
NULL, temp_storage_bytes, d_in, d_out, N, context->cuda_stream());
auto buffer_size = temp_storage_bytes / sizeof(T);
buffer_size += temp_storage_bytes % sizeof(T) != 0 ? 1 : 0;
buffer->Resize(buffer_size);
void* d_temp_storage = static_cast<void*>(buffer->template mutable_data<T>());
// Run sum-reduction
cub::DeviceReduce::Sum(
d_temp_storage,
temp_storage_bytes,
d_in,
d_out,
N,
context->cuda_stream());
}
template <>
void device_reduce<float16>(
const float16* in,
float16* out,
int N,
Tensor<CUDAContext>* buffer,
CUDAContext* context) {
auto buffer_size = 1;
if (buffer->size() != buffer_size) {
buffer->Resize(buffer_size);
math::Set<float16, CUDAContext>(
N,
convert::To<float, float16>(1.),
buffer->mutable_data<float16>(),
context);
}
CUBLAS_ENFORCE(cublasDotEx(
context->cublas_handle(),
N,
in,
CUDA_R_16F,
1,
buffer->data<float16>(),
CUDA_R_16F,
0,
out,
CUDA_R_16F,
CUDA_R_32F));
}
template <typename T, int BLOCK_THREADS>
__global__ void
reduce_sum_like(const T* g_idata, T* g_odata, int pre, int N, int post) {
int n = blockIdx.x;
float sum = 0.0;
int limit = pre * post;
for (int i = threadIdx.x; i < limit; i += blockDim.x) {
int curPre = i / post;
int curPost = i % post;
sum +=
convert::To<T, float>(g_idata[curPre * N * post + n * post + curPost]);
}
// uses a shared memory reduction within block
typedef cub::BlockReduce<float, BLOCK_THREADS> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
float aggregate = BlockReduceT(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
g_odata[n] = convert::To<float, T>(aggregate);
}
}
} // namespace
template <>
template <typename T>
bool SumReduceLikeOp<CUDAContext>::DoRunWithType() {
const auto& A = Input(0);
const auto& B = Input(1);
auto* C = Output(0);
auto count = A.size();
CAFFE_ENFORCE(&B != C, "In-place is not allowed.");
C->ResizeLike(B);
const T* Adata = A.template data<T>();
auto* Cdata = C->template mutable_data<T>();
if (B.size() == 1) {
device_reduce<T>(Adata, Cdata, count, &sum_buffer_, &context_);
} else {
size_t pre, n, post;
std::tie(pre, n, post) = ComputeLegacyBroadcastSizes(A, B, axis_);
// because we check shape(B) \in shape(A) before,
// post and pre cannot be 1 at same time
if (post == 1) {
reduce_sum_like_post1<T>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(Adata, Cdata, pre, n);
} else {
if (post >= 128) {
reduce_sum_like<T, 512>
<<<n, 512, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
} else if (post >= 64) {
reduce_sum_like<T, 128>
<<<n, 128, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
} else if (post >= 32) {
reduce_sum_like<T, 64>
<<<n, 64, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
} else {
reduce_sum_like<T, 32>
<<<n, 32, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post);
}
}
}
return true;
}
template <>
bool SumReduceLikeOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, float16>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR(SumReduceLike, SumReduceLikeOp<CUDAContext>);
} // namespace caffe2
|
310f99e5b04a706f59179c3139147da15342d6ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _FILTER_KERNEL_H_
#define _FILTER_KERNEL_H_
__global__ void SobelFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT * BLOCK_WIDTH];
float s_SobelMatrix[9];
s_SobelMatrix[0] = -1;
s_SobelMatrix[1] = 0;
s_SobelMatrix[2] = 1;
s_SobelMatrix[3] = -2;
s_SobelMatrix[4] = 0;
s_SobelMatrix[5] = 2;
s_SobelMatrix[6] = -1;
s_SobelMatrix[7] = 0;
s_SobelMatrix[8] = 1;
// Computer the X and Y global coordinates
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: Check 1
// Handle the extra thread case where the image width or height
//
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
// STUDENT: Make sure only the thread ids should write the sum of the neighbors.
// float sumX = 0, sumY=0;
// g_DataOut[index] = abs(sumX) + abs(sumY) > EDGE_VALUE_THRESHOLD ? 255 : 0;
// if((threadIdx.x >= FILTER_RADIUS) && (threadIdx.x < (BLOCK_WIDTH - FILTER_RADIUS))&&(threadIdx.y >= FILTER_RADIUS) && (threadIdx.y < (BLOCK_HEIGHT - FILTER_RADIUS)))
if((threadIdx.x >= FILTER_RADIUS) && (threadIdx.x <( BLOCK_WIDTH - FILTER_RADIUS)) &&(threadIdx.y >= FILTER_RADIUS) && (threadIdx.y < (BLOCK_HEIGHT - FILTER_RADIUS)))
{
//float sum = 0;
float sumX = 0, sumY=0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; ++dy)
{
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; ++dx)
{
float pixelValue = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
//float pixelValue = (float)(g_DataIn[Index + (dy * blockDim.x + dx)]);
//float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
//sum += pixelValue;
// sumX += Pixel * s_SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER +(dx+FILTER_RADIUS)];
//sumY += Pixel * s_SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER +(dy+FILTER_RADIUS)];
sumX+= pixelValue * s_SobelMatrix[(dy +FILTER_RADIUS)*(FILTER_DIAMETER)+(dx + FILTER_RADIUS)];
sumY+= pixelValue * s_SobelMatrix[(dx +FILTER_RADIUS)*(FILTER_DIAMETER)+(dy + FILTER_RADIUS)];
}
}
// g_DataOut[index] = (BYTE)(sum / FILTER_AREA);
g_DataOut[index] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;//1023 : 0
}
}
/*__global__ void AverageFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for Average Filter : use Sobel as base code
}
__global__ void HighBoostFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for High Boost Filter : use Sobel as base code
}
*/
#endif // _FILTER_KERNEL_H_
| 310f99e5b04a706f59179c3139147da15342d6ef.cu |
#ifndef _FILTER_KERNEL_H_
#define _FILTER_KERNEL_H_
__global__ void SobelFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT * BLOCK_WIDTH];
float s_SobelMatrix[9];
s_SobelMatrix[0] = -1;
s_SobelMatrix[1] = 0;
s_SobelMatrix[2] = 1;
s_SobelMatrix[3] = -2;
s_SobelMatrix[4] = 0;
s_SobelMatrix[5] = 2;
s_SobelMatrix[6] = -1;
s_SobelMatrix[7] = 0;
s_SobelMatrix[8] = 1;
// Computer the X and Y global coordinates
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: Check 1
// Handle the extra thread case where the image width or height
//
if (x >= width || y >= height)
return;
// STUDENT: Check 2
// Handle the border cases of the global image
if( x < FILTER_RADIUS || y < FILTER_RADIUS) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((x > width - FILTER_RADIUS - 1)&&(x <width)) {
g_DataOut[index] = g_DataIn[index];
return;
}
if ((y > height - FILTER_RADIUS - 1)&&(y < height)) {
g_DataOut[index] = g_DataIn[index];
return;
}
// Perform the first load of values into shared memory
int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
// STUDENT: Make sure only the thread ids should write the sum of the neighbors.
// float sumX = 0, sumY=0;
// g_DataOut[index] = abs(sumX) + abs(sumY) > EDGE_VALUE_THRESHOLD ? 255 : 0;
// if((threadIdx.x >= FILTER_RADIUS) && (threadIdx.x < (BLOCK_WIDTH - FILTER_RADIUS))&&(threadIdx.y >= FILTER_RADIUS) && (threadIdx.y < (BLOCK_HEIGHT - FILTER_RADIUS)))
if((threadIdx.x >= FILTER_RADIUS) && (threadIdx.x <( BLOCK_WIDTH - FILTER_RADIUS)) &&(threadIdx.y >= FILTER_RADIUS) && (threadIdx.y < (BLOCK_HEIGHT - FILTER_RADIUS)))
{
//float sum = 0;
float sumX = 0, sumY=0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; ++dy)
{
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; ++dx)
{
float pixelValue = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
//float pixelValue = (float)(g_DataIn[Index + (dy * blockDim.x + dx)]);
//float Pixel = (float)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
//sum += pixelValue;
// sumX += Pixel * s_SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER +(dx+FILTER_RADIUS)];
//sumY += Pixel * s_SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER +(dy+FILTER_RADIUS)];
sumX+= pixelValue * s_SobelMatrix[(dy +FILTER_RADIUS)*(FILTER_DIAMETER)+(dx + FILTER_RADIUS)];
sumY+= pixelValue * s_SobelMatrix[(dx +FILTER_RADIUS)*(FILTER_DIAMETER)+(dy + FILTER_RADIUS)];
}
}
// g_DataOut[index] = (BYTE)(sum / FILTER_AREA);
g_DataOut[index] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;//1023 : 0
}
}
/*__global__ void AverageFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for Average Filter : use Sobel as base code
}
__global__ void HighBoostFilter(unsigned char* g_DataIn, unsigned char* g_DataOut, int width, int height)
{
__shared__ unsigned char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x ;//- FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y ;//- FILTER_RADIUS;
// Get the Global index into the original image
int index = y * (width) + x;
// STUDENT: write code for High Boost Filter : use Sobel as base code
}
*/
#endif // _FILTER_KERNEL_H_
|
8226305b1175a6957de27c6c867b5d7aa70fdcee.hip | // !!! This is a file automatically generated by hipify!!!
/* ==================================================================
Programmer: Daniel Sawyer ([email protected])
The basic SDH algorithm implementation for 3D data
To compile: nvcc proj2-danielsawyer.cu -o SDH in the rc machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
//MY INCLUDES
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
//overloaded to show GPU time
double report_running_time(int blah) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("\nRunning time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
//overloaded taking 1 arg
void output_histogram(bucket* histogram1){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram1[i].d_cnt);
total_cnt += histogram1[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
//overloaded taking 2 args
void output_histogram(bucket* histogram1, bucket* histogram2){
int i;
long long total_cnt = 0, total_cnt2 = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", abs(histogram1[i].d_cnt - histogram2[i].d_cnt));
total_cnt += histogram1[i].d_cnt;
total_cnt2 += histogram2[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", abs(total_cnt - total_cnt2));
else printf("| ");
}
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
//CUDA KERNEL ALGO 1
__global__ void PDH_Algo1(atom *d_atom_list, bucket *d_histogram, long long d_PDH_acnt, double d_PDH_res) {
register double dist, xt, yt, zt;
register int i, j, h_pos;
i = threadIdx.x + blockDim.x * blockIdx.x;
for(j = i+1; j < d_PDH_acnt; ++j) {
xt = d_atom_list[i].x_pos - d_atom_list[j].x_pos;
yt = d_atom_list[i].y_pos - d_atom_list[j].y_pos;
zt = d_atom_list[i].z_pos - d_atom_list[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
//CUDA KERNEL ALGO 2
__global__ void PDH_Algo2(atom *d_atom_list, bucket *d_histogram, long long d_PDH_acnt, double d_PDH_res, int nbuckets, int nblocks) {
register double dist, xt, yt, zt;
register int i, j, h_pos;
extern __shared__ atom smem[];
atom* R = (atom*)smem;
atom* L = (atom*)&R[blockDim.x];
L[threadIdx.x] = d_atom_list[threadIdx.x + blockIdx.x*blockDim.x];
__syncthreads();
for(i = blockIdx.x+1; i < nblocks; i++) {
R[threadIdx.x] = d_atom_list[threadIdx.x + i*blockDim.x];
__syncthreads();
if(i*blockDim.x < d_PDH_acnt)
for(j = 0; j < blockDim.x; j++) {
if(j + i*blockDim.x < d_PDH_acnt) {
xt = L[threadIdx.x].x_pos - R[j].x_pos;
yt = L[threadIdx.x].y_pos - R[j].y_pos;
zt = L[threadIdx.x].z_pos - R[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
for(j = threadIdx.x +1; j < blockDim.x; j++) {
if(j + blockIdx.x*blockDim.x < d_PDH_acnt) {
xt = L[threadIdx.x].x_pos - L[j].x_pos;
yt = L[threadIdx.x].y_pos - L[j].y_pos;
zt = L[threadIdx.x].z_pos - L[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
//CUDA KERNEL ALGO 3
__global__ void PDH_Algo3(atom *d_atom_list, bucket *d_histogram, long long d_PDH_acnt, double d_PDH_res, int nbuckets, int nblocks) {
register double dist, xt, yt, zt;
register int i, j, h_pos;
register atom L;
extern __shared__ atom smem[];
atom* R = (atom*)smem;
bucket* s_hist = (bucket*)&R[blockDim.x];
if(threadIdx.x < nbuckets)
s_hist[threadIdx.x].d_cnt = 0;
L = d_atom_list[threadIdx.x + blockIdx.x*blockDim.x];
__syncthreads();
for(i = blockIdx.x+1; i < nblocks; i++) {
R[threadIdx.x] = d_atom_list[threadIdx.x + i*blockDim.x];
__syncthreads();
if(i*blockDim.x < d_PDH_acnt)
for(j = 0; j < blockDim.x; j++) {
if(j + i*blockDim.x < d_PDH_acnt) {
xt = L.x_pos - R[j].x_pos;
yt = L.y_pos - R[j].y_pos;
zt = L.z_pos - R[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
R[threadIdx.x] = L;
__syncthreads();
for(j = threadIdx.x +1; j < blockDim.x; j++) {
if(j + blockIdx.x*blockDim.x < d_PDH_acnt) {
xt = L.x_pos - R[j].x_pos;
yt = L.y_pos - R[j].y_pos;
zt = L.z_pos - R[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
float CudaPrep(bucket * histogram2) {
//sizes of atom and bucket arrays
int size_atom = sizeof(atom)*PDH_acnt;
int size_hist = sizeof(bucket)*num_buckets;
//grid and block sizes
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
dim3 threads(64);
//dim3 threads(deviceProp.warpSize);
dim3 grid(ceil((float)PDH_acnt/threads.x));
//Device Vars
bucket *d_histogram;
atom *d_atom_list;
int num_blocks = ceil((float)PDH_acnt/threads.x);
//Allocate device memory
hipMalloc((void **) &d_histogram, size_hist);
hipMalloc((void**) &d_atom_list, size_atom);
//Copy to device
hipMemcpy(d_atom_list, atom_list, size_atom, hipMemcpyHostToDevice);
hipMemset(d_histogram, 0, size_hist);
//kernel execution time crap
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//run cuda kernel
//PDH_Algo1<<<grid,threads>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res);
//PDH_Algo2<<<grid, threads, 2*threads.x*sizeof(atom)>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res, num_buckets, num_blocks);
hipLaunchKernelGGL(( PDH_Algo3), dim3(grid), dim3(threads), threads.x*sizeof(atom) + num_buckets*sizeof(bucket), 0, d_atom_list, d_histogram, PDH_acnt, PDH_res, num_buckets, num_blocks);
//kernel execution stop
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//copy new gpu histogram back to host from device
hipMemcpy(histogram2, d_histogram, size_hist, hipMemcpyDeviceToHost);
//free device memory
hipFree(d_histogram); hipFree(d_atom_list);
return elapsedTime;
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
//PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram();
/* NEW SHIT */
//New histogram that will come from the device
bucket *histogram2 = (bucket*)malloc(sizeof(bucket)*num_buckets);
//memset(histogram2, 0, size_hist);
//start time
gettimeofday(&startTime, &Idunno);
//run on GPU
float elapsedTime = CudaPrep(histogram2);
//check runtime
report_running_time(1);
//print device histogram
output_histogram(histogram2);
//Difference between cpu and gpu
printf("\nCPU vs GPU Histogram Differences\n");
output_histogram(histogram, histogram2);
//Free memory.
free(histogram); free(atom_list);
printf("\n******** Total Running Time of Kernel = %0.5f ms *******\n", elapsedTime);
return 0;
}
| 8226305b1175a6957de27c6c867b5d7aa70fdcee.cu | /* ==================================================================
Programmer: Daniel Sawyer ([email protected])
The basic SDH algorithm implementation for 3D data
To compile: nvcc proj2-danielsawyer.cu -o SDH in the rc machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
//MY INCLUDES
#include <iostream>
#include <cuda_runtime.h>
#include <cuda.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
//overloaded to show GPU time
double report_running_time(int blah) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("\nRunning time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
//overloaded taking 1 arg
void output_histogram(bucket* histogram1){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram1[i].d_cnt);
total_cnt += histogram1[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
//overloaded taking 2 args
void output_histogram(bucket* histogram1, bucket* histogram2){
int i;
long long total_cnt = 0, total_cnt2 = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", abs(histogram1[i].d_cnt - histogram2[i].d_cnt));
total_cnt += histogram1[i].d_cnt;
total_cnt2 += histogram2[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", abs(total_cnt - total_cnt2));
else printf("| ");
}
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
//CUDA KERNEL ALGO 1
__global__ void PDH_Algo1(atom *d_atom_list, bucket *d_histogram, long long d_PDH_acnt, double d_PDH_res) {
register double dist, xt, yt, zt;
register int i, j, h_pos;
i = threadIdx.x + blockDim.x * blockIdx.x;
for(j = i+1; j < d_PDH_acnt; ++j) {
xt = d_atom_list[i].x_pos - d_atom_list[j].x_pos;
yt = d_atom_list[i].y_pos - d_atom_list[j].y_pos;
zt = d_atom_list[i].z_pos - d_atom_list[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
//CUDA KERNEL ALGO 2
__global__ void PDH_Algo2(atom *d_atom_list, bucket *d_histogram, long long d_PDH_acnt, double d_PDH_res, int nbuckets, int nblocks) {
register double dist, xt, yt, zt;
register int i, j, h_pos;
extern __shared__ atom smem[];
atom* R = (atom*)smem;
atom* L = (atom*)&R[blockDim.x];
L[threadIdx.x] = d_atom_list[threadIdx.x + blockIdx.x*blockDim.x];
__syncthreads();
for(i = blockIdx.x+1; i < nblocks; i++) {
R[threadIdx.x] = d_atom_list[threadIdx.x + i*blockDim.x];
__syncthreads();
if(i*blockDim.x < d_PDH_acnt)
for(j = 0; j < blockDim.x; j++) {
if(j + i*blockDim.x < d_PDH_acnt) {
xt = L[threadIdx.x].x_pos - R[j].x_pos;
yt = L[threadIdx.x].y_pos - R[j].y_pos;
zt = L[threadIdx.x].z_pos - R[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
for(j = threadIdx.x +1; j < blockDim.x; j++) {
if(j + blockIdx.x*blockDim.x < d_PDH_acnt) {
xt = L[threadIdx.x].x_pos - L[j].x_pos;
yt = L[threadIdx.x].y_pos - L[j].y_pos;
zt = L[threadIdx.x].z_pos - L[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
//CUDA KERNEL ALGO 3
__global__ void PDH_Algo3(atom *d_atom_list, bucket *d_histogram, long long d_PDH_acnt, double d_PDH_res, int nbuckets, int nblocks) {
register double dist, xt, yt, zt;
register int i, j, h_pos;
register atom L;
extern __shared__ atom smem[];
atom* R = (atom*)smem;
bucket* s_hist = (bucket*)&R[blockDim.x];
if(threadIdx.x < nbuckets)
s_hist[threadIdx.x].d_cnt = 0;
L = d_atom_list[threadIdx.x + blockIdx.x*blockDim.x];
__syncthreads();
for(i = blockIdx.x+1; i < nblocks; i++) {
R[threadIdx.x] = d_atom_list[threadIdx.x + i*blockDim.x];
__syncthreads();
if(i*blockDim.x < d_PDH_acnt)
for(j = 0; j < blockDim.x; j++) {
if(j + i*blockDim.x < d_PDH_acnt) {
xt = L.x_pos - R[j].x_pos;
yt = L.y_pos - R[j].y_pos;
zt = L.z_pos - R[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
R[threadIdx.x] = L;
__syncthreads();
for(j = threadIdx.x +1; j < blockDim.x; j++) {
if(j + blockIdx.x*blockDim.x < d_PDH_acnt) {
xt = L.x_pos - R[j].x_pos;
yt = L.y_pos - R[j].y_pos;
zt = L.z_pos - R[j].z_pos;
dist = sqrt(xt*xt + yt*yt + zt*zt);
h_pos = (int)(dist/d_PDH_res);
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
__syncthreads();
}
}
}
float CudaPrep(bucket * histogram2) {
//sizes of atom and bucket arrays
int size_atom = sizeof(atom)*PDH_acnt;
int size_hist = sizeof(bucket)*num_buckets;
//grid and block sizes
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
dim3 threads(64);
//dim3 threads(deviceProp.warpSize);
dim3 grid(ceil((float)PDH_acnt/threads.x));
//Device Vars
bucket *d_histogram;
atom *d_atom_list;
int num_blocks = ceil((float)PDH_acnt/threads.x);
//Allocate device memory
cudaMalloc((void **) &d_histogram, size_hist);
cudaMalloc((void**) &d_atom_list, size_atom);
//Copy to device
cudaMemcpy(d_atom_list, atom_list, size_atom, cudaMemcpyHostToDevice);
cudaMemset(d_histogram, 0, size_hist);
//kernel execution time crap
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//run cuda kernel
//PDH_Algo1<<<grid,threads>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res);
//PDH_Algo2<<<grid, threads, 2*threads.x*sizeof(atom)>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res, num_buckets, num_blocks);
PDH_Algo3<<<grid, threads, threads.x*sizeof(atom) + num_buckets*sizeof(bucket)>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res, num_buckets, num_blocks);
//kernel execution stop
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy new gpu histogram back to host from device
cudaMemcpy(histogram2, d_histogram, size_hist, cudaMemcpyDeviceToHost);
//free device memory
cudaFree(d_histogram); cudaFree(d_atom_list);
return elapsedTime;
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
//PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram();
/* NEW SHIT */
//New histogram that will come from the device
bucket *histogram2 = (bucket*)malloc(sizeof(bucket)*num_buckets);
//memset(histogram2, 0, size_hist);
//start time
gettimeofday(&startTime, &Idunno);
//run on GPU
float elapsedTime = CudaPrep(histogram2);
//check runtime
report_running_time(1);
//print device histogram
output_histogram(histogram2);
//Difference between cpu and gpu
printf("\nCPU vs GPU Histogram Differences\n");
output_histogram(histogram, histogram2);
//Free memory.
free(histogram); free(atom_list);
printf("\n******** Total Running Time of Kernel = %0.5f ms *******\n", elapsedTime);
return 0;
}
|
a8c9bf017d5c67eb2d8f7fdce201c678f5b7280e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdlib.h>
#include <time.h>
clock_t sum_time = 0;
__global__ void kernel(int *or_mat, int * to){
__shared__ int mat[16][16];
int bx = blockIdx.x * 16;
int by = blockIdx.y * 16;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < 1024 && j < 1024){
mat[threadIdx.x][threadIdx.y] = or_mat[i * 1024 + j];
}
__syncthreads();
if (tj < 1024 && ti < 1024){
to[ti * 1024 + tj] = mat[threadIdx.y][threadIdx.x];
}
}
int main(){
int n, it, i, j;
int * mat, *to, *d_mat, *d_to;
n = 1024;
mat = (int*)malloc(n*n*sizeof(int));
to = (int*)malloc(n*n*sizeof(int));
printf("input the iter times:\n");
scanf("%d", &it);
for (i = 0; i<n; i++){
for (j = 0; j<n; j++){
mat[i*n + j] = 1;
}
}
hipMalloc((void**)&d_mat, n*n*sizeof(int));
hipMalloc((void**)&d_to, n*n*sizeof(int));
hipMemcpy(d_mat, mat, n*n*sizeof(int), hipMemcpyHostToDevice);
dim3 dimBlock(16, 16);
dim3 dimGrid(64, 64);
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (i = 0; i<it; i++){
kernel << <dimGrid, dimBlock >> >(d_mat, d_to);
}
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
float time;
hipEventElapsedTime(&time, start, stop);
printf("The total running time is: %f\n", time);
printf("input any to exit\n");
scanf("%d", &it);
}
| a8c9bf017d5c67eb2d8f7fdce201c678f5b7280e.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <time.h>
clock_t sum_time = 0;
__global__ void kernel(int *or_mat, int * to){
__shared__ int mat[16][16];
int bx = blockIdx.x * 16;
int by = blockIdx.y * 16;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < 1024 && j < 1024){
mat[threadIdx.x][threadIdx.y] = or_mat[i * 1024 + j];
}
__syncthreads();
if (tj < 1024 && ti < 1024){
to[ti * 1024 + tj] = mat[threadIdx.y][threadIdx.x];
}
}
int main(){
int n, it, i, j;
int * mat, *to, *d_mat, *d_to;
n = 1024;
mat = (int*)malloc(n*n*sizeof(int));
to = (int*)malloc(n*n*sizeof(int));
printf("input the iter times:\n");
scanf("%d", &it);
for (i = 0; i<n; i++){
for (j = 0; j<n; j++){
mat[i*n + j] = 1;
}
}
cudaMalloc((void**)&d_mat, n*n*sizeof(int));
cudaMalloc((void**)&d_to, n*n*sizeof(int));
cudaMemcpy(d_mat, mat, n*n*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16);
dim3 dimGrid(64, 64);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (i = 0; i<it; i++){
kernel << <dimGrid, dimBlock >> >(d_mat, d_to);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
printf("The total running time is: %f\n", time);
printf("input any to exit\n");
scanf("%d", &it);
}
|
5ca2b3d3b5dd5b457954f8359ed6ddb1948bb60d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "helper_cuda.h"
template <typename T, typename C>
__global__
void sub(T* output, const C* starter, const C* stopper, int64_t startsoffset, int64_t stopsoffset, int64_t n) {
int thid = threadIdx.x + blockIdx.x * blockDim.x;
if (thid < n) {
C start = starter[thid + startsoffset];
C stop = stopper[thid + stopsoffset];
assert(start <= stop);
output[thid] = stop - start;
}
}
template <typename T, typename C>
void prefix_sum(T* output, const C* arr, const C* arr2, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
T* d_output;
C* d_arr, * d_arr2;
checkCudaErrors(hipMalloc((void**)&d_output, length * sizeof(T)));
checkCudaErrors(hipMalloc((void**)&d_arr, length * sizeof(C)));
checkCudaErrors(hipMemcpy(d_arr, arr, length * sizeof(C), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_arr2, length * sizeof(C)));
checkCudaErrors(hipMemcpy(d_arr2, arr2, length * sizeof(C), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( sub<T, C>), dim3(block), dim3(thread), 0, 0, d_output, d_arr, d_arr2, startsoffset, stopsoffset, length);
checkCudaErrors(hipDeviceSynchronize());
thrust::device_vector<T> data(d_output, d_output+length);
thrust::device_vector<T> temp(data.size() + 1);
thrust::exclusive_scan(data.begin(), data.end(), temp.begin());
temp[data.size()] = data.back() + temp[data.size() - 1];
thrust::copy(temp.begin(), temp.end(), output);
checkCudaErrors(hipFree(d_output));
checkCudaErrors(hipFree(d_arr));
checkCudaErrors(hipFree(d_arr2));
}
int main() {
int const size = 100000;
int starter[size], stopper[size], output[size + 1];
for (int i = 0; i < size; i++) {
starter[i] = i;
stopper[i] = i + 1;
}
prefix_sum<int, int>(output, starter, stopper, 0, 0, size);
hipDeviceSynchronize();
for (int i = 0; i < size + 1; i++) {
std::cout << output[i] << "\n";
}
}
| 5ca2b3d3b5dd5b457954f8359ed6ddb1948bb60d.cu | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "helper_cuda.h"
template <typename T, typename C>
__global__
void sub(T* output, const C* starter, const C* stopper, int64_t startsoffset, int64_t stopsoffset, int64_t n) {
int thid = threadIdx.x + blockIdx.x * blockDim.x;
if (thid < n) {
C start = starter[thid + startsoffset];
C stop = stopper[thid + stopsoffset];
assert(start <= stop);
output[thid] = stop - start;
}
}
template <typename T, typename C>
void prefix_sum(T* output, const C* arr, const C* arr2, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
T* d_output;
C* d_arr, * d_arr2;
checkCudaErrors(cudaMalloc((void**)&d_output, length * sizeof(T)));
checkCudaErrors(cudaMalloc((void**)&d_arr, length * sizeof(C)));
checkCudaErrors(cudaMemcpy(d_arr, arr, length * sizeof(C), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_arr2, length * sizeof(C)));
checkCudaErrors(cudaMemcpy(d_arr2, arr2, length * sizeof(C), cudaMemcpyHostToDevice));
sub<T, C><<<block, thread>>>(d_output, d_arr, d_arr2, startsoffset, stopsoffset, length);
checkCudaErrors(cudaDeviceSynchronize());
thrust::device_vector<T> data(d_output, d_output+length);
thrust::device_vector<T> temp(data.size() + 1);
thrust::exclusive_scan(data.begin(), data.end(), temp.begin());
temp[data.size()] = data.back() + temp[data.size() - 1];
thrust::copy(temp.begin(), temp.end(), output);
checkCudaErrors(cudaFree(d_output));
checkCudaErrors(cudaFree(d_arr));
checkCudaErrors(cudaFree(d_arr2));
}
int main() {
int const size = 100000;
int starter[size], stopper[size], output[size + 1];
for (int i = 0; i < size; i++) {
starter[i] = i;
stopper[i] = i + 1;
}
prefix_sum<int, int>(output, starter, stopper, 0, 0, size);
cudaDeviceSynchronize();
for (int i = 0; i < size + 1; i++) {
std::cout << output[i] << "\n";
}
}
|
c31bfa261bea35d92a31fa26071bee8f81e41f8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ENGR-E 517 High Performance Computing
* Original Author : Matt Anderson (Serial Implementation 2D)
* Name : Ninaad Joshi (Serial and Parallel Implementation 1D)
* Project : Demonstration of the 2D Heat Distribution
* Problem using CUDA programming model
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
/*****************************************************************/
/* set the DEBUG flag to 1 to display the values for every iteration,
* or set to 0 for measuring time for both CPU and GPU
*/
#ifndef DEBUG
#define DEBUG 0
#endif
/* set the DISPLAY flag to 1 to display the final matrix for CPU and GPU
*/
#ifndef DISPLAY
#define DISPLAY 0
#endif
/****************************************************************/
#define TEMP 50.0
#define EPS 1e-6
#define I_FIX 5
#define J_FIX 5
#ifndef COLS
#define COLS 100
#endif
#ifndef ROWS
#define ROWS 100
#endif
#ifndef BLOCK_SIZE_X
#define BLOCK_SIZE_X 32
#endif
#ifndef BLOCK_SIZE_Y
#define BLOCK_SIZE_Y 32
#endif
double* alloc_matrix(){
double* matrix;
matrix = (double*) malloc(ROWS * COLS * sizeof(double));
return matrix;
}
void init_matrix(double* matrix){
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++) {
matrix[i * COLS + j] = 0.0;
}
matrix[I_FIX * COLS + J_FIX] = TEMP;
}
void print_matrix(double* matrix){
for (int i = 0; i < ROWS; i++) {
for (int j = 0; j < COLS; j++)
printf("%3.7lf ", matrix[i * COLS + j]);
printf("\n");
}
}
void copy_matrix(double* dest, double* source) {
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++)
dest[i * COLS + j] = source[i * COLS + j];
}
double max_abs(double* m1, double* m2){
double max_val = DBL_MIN;
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++){
if (fabs(m1[i * COLS + j] - m2[i * COLS + j]) > max_val) {
max_val = fabs(m1[i * COLS + j] - m2[i * COLS + j]);
}
}
return max_val;
}
/***********CPU***********/
void compute_new_values(double* old_matrix, double* new_matrix){
for (int i = 1; i < ROWS-1; i++)
for (int j= 1; j < COLS-1; j++)
new_matrix[i * COLS + j] = 0.25 * (old_matrix[(i-1) * COLS + j]
+ old_matrix[(i+1) * COLS + j]
+ old_matrix[i * COLS + (j-1)]
+ old_matrix[i * COLS + (j+1)]);
new_matrix[I_FIX * COLS + J_FIX] = TEMP;
}
/***********CPU***********/
/***********GPU***********/
__global__ void compute_new_values_gpu(const double* __restrict__ d_old_matrix,
double* __restrict__ d_new_matrix){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i == I_FIX && j == J_FIX)
d_new_matrix[I_FIX * COLS + J_FIX] = TEMP;
else if (0 < i && i < ROWS - 1 && 0 < j && j < COLS - 1)
d_new_matrix[i * COLS + j] = 0.25 * (d_old_matrix[(i-1) * COLS + j]
+ d_old_matrix[(i+1) * COLS + j]
+ d_old_matrix[i * COLS + (j-1)]
+ d_old_matrix[i * COLS + (j+1)]);
}
/***********GPU***********/
/* Round the value of a / b to nearest higher integer value
*/
int divideUp(int n1, int n2) {
return (n1 % n2 != 0) ? (n1 / n2 + 1) : (n1 / n2);
}
int main(int argc, char *argv[]) {
//CPU
double *a_old = alloc_matrix(); //allocate memory for the matrices
double *a_new = alloc_matrix();
struct timeval a_start, a_end;
double tos_serial;
// GPU
long int iterations = 0, i = 0;
double *h_in = alloc_matrix(); //allocate memory for the matrices
double *h_out = alloc_matrix();
int error;
double *d_in;
double *d_out;
struct timeval h_start, h_end;
double tos_cuda;
printf("DISPLAY = %d DEBUG = %d ROWS = %d COLS = %d\n", DISPLAY, DEBUG, ROWS, COLS);
printf("BLOCK_SIZE_X = %d BLOCK_SIZE_Y = %d\n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
/*************************CPU**************************/
init_matrix(a_old); //initialize the matrices
init_matrix(a_new);
printf("CPU: Starting the serial heat distribution\n");
if (DISPLAY || DEBUG){
printf("CPU:The initial heat distribution matrix is:\n");
print_matrix(a_old);
}
gettimeofday(&a_start, NULL);
while (1) {
if (DEBUG)
printf("\nCPU:Performing a new iteration...%ld\n", iterations);
//compute new values and put them into a_new
compute_new_values(a_old, a_new);
if (DEBUG) {
printf("CPU:a_old is:\n"); //output matrix to screen
print_matrix(a_old);
printf("CPU:a_new is:\n");
print_matrix(a_new);
}
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(a_old, a_new);
if (DEBUG)
printf("CPU:Max diff is: %f\n", max_diff);
if (max_diff < EPS)
break;
copy_matrix(a_old, a_new); //assign values of a_new to a_old
if (DEBUG)
printf("CPU:End of iteration...%ld\n", iterations);
++iterations;
}
gettimeofday(&a_end, NULL);
tos_serial = (a_end.tv_sec - a_start.tv_sec) + \
(a_end.tv_usec - a_start.tv_usec)/1000000.0;
printf("CPU:Time required is %.3e\n", tos_serial);
if (DISPLAY || DEBUG){
printf("CPU:The final heat distribution matrix is:\n");
print_matrix(a_new);
}
printf("The iterations performed by the serial code are %ld\n", iterations);
/*************************GPU**********************/
printf("GPU:Starting the parallel heat distribution on CUDA\n");
init_matrix(h_in); //initialize the matrices
init_matrix(h_out);
hipMalloc((void **)&d_in, (size_t) ROWS * COLS * sizeof(double));
error = hipGetLastError();
if (DEBUG)
printf("GPU:d_in hipMalloc error = %d\n", error);
hipMalloc((void **)&d_out, (size_t) ROWS * COLS * sizeof(double));
error = hipGetLastError();
if (DEBUG)
printf("GPU:d_out hipMalloc error = %d\n", error);
// copy data from host memory to device memory
hipMemcpy(d_in, h_in, ROWS * COLS * sizeof(double), hipMemcpyHostToDevice);
// copy data from device memory to device memory
hipMemcpy(d_out, d_in, ROWS * COLS * sizeof(double), hipMemcpyDeviceToDevice);
// block and grid dimensions
dim3 blocks(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grids(divideUp(ROWS, BLOCK_SIZE_X), divideUp(COLS, BLOCK_SIZE_Y));
gettimeofday(&h_start, NULL);
for(i = 0; i < iterations + 1; ++i) {
//compute new values and put them into d_out
hipLaunchKernelGGL(( compute_new_values_gpu), dim3(grids), dim3(blocks), 0, 0, d_in, d_out);
if (DEBUG){
printf("GPU:Performing a new iteration...%ld\n", i);
// copy data from device memory to host memory
hipMemcpy(h_in, d_in, ROWS * COLS * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(h_out, d_out, ROWS * COLS * sizeof(double), hipMemcpyDeviceToHost);
printf("GPU:d_in is:\n"); //output d_in to screen
print_matrix(h_in);
printf("GPU:d_out is:\n"); //output d_out to screen
print_matrix(h_out);
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(h_in, h_out);
printf("GPU:Max diff is: %f\n", max_diff);
if (max_diff < EPS)
break;
printf("GPU:End of iteration...%ld\n", i);
}
// make the current d_out as d_in
hipMemcpy(d_in, d_out, ROWS * COLS * sizeof(double), hipMemcpyDeviceToDevice);
}
gettimeofday(&h_end, NULL);
// copy data from device memory to host memory
hipMemcpy(h_out, d_out, ROWS * COLS * sizeof(double), hipMemcpyDeviceToHost);
tos_cuda = (h_end.tv_sec - h_start.tv_sec) + \
(h_end.tv_usec - h_start.tv_usec)/1000000.0;
printf("GPU:Time required is %.3e seconds\n", tos_cuda);
if (DISPLAY || DEBUG){
printf("GPU:The final heat distribution matrix is:\n");
print_matrix(h_out);
}
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(h_out, a_new);
printf("GPU:Max diff between serial and CUDA implementation is: %f\n",\
max_diff);
printf("Speed Up achieved is : %.3lf\n", tos_serial/tos_cuda);
free(h_in);
free(h_out);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| c31bfa261bea35d92a31fa26071bee8f81e41f8c.cu | /* ENGR-E 517 High Performance Computing
* Original Author : Matt Anderson (Serial Implementation 2D)
* Name : Ninaad Joshi (Serial and Parallel Implementation 1D)
* Project : Demonstration of the 2D Heat Distribution
* Problem using CUDA programming model
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
/*****************************************************************/
/* set the DEBUG flag to 1 to display the values for every iteration,
* or set to 0 for measuring time for both CPU and GPU
*/
#ifndef DEBUG
#define DEBUG 0
#endif
/* set the DISPLAY flag to 1 to display the final matrix for CPU and GPU
*/
#ifndef DISPLAY
#define DISPLAY 0
#endif
/****************************************************************/
#define TEMP 50.0
#define EPS 1e-6
#define I_FIX 5
#define J_FIX 5
#ifndef COLS
#define COLS 100
#endif
#ifndef ROWS
#define ROWS 100
#endif
#ifndef BLOCK_SIZE_X
#define BLOCK_SIZE_X 32
#endif
#ifndef BLOCK_SIZE_Y
#define BLOCK_SIZE_Y 32
#endif
double* alloc_matrix(){
double* matrix;
matrix = (double*) malloc(ROWS * COLS * sizeof(double));
return matrix;
}
void init_matrix(double* matrix){
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++) {
matrix[i * COLS + j] = 0.0;
}
matrix[I_FIX * COLS + J_FIX] = TEMP;
}
void print_matrix(double* matrix){
for (int i = 0; i < ROWS; i++) {
for (int j = 0; j < COLS; j++)
printf("%3.7lf ", matrix[i * COLS + j]);
printf("\n");
}
}
void copy_matrix(double* dest, double* source) {
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++)
dest[i * COLS + j] = source[i * COLS + j];
}
double max_abs(double* m1, double* m2){
double max_val = DBL_MIN;
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++){
if (fabs(m1[i * COLS + j] - m2[i * COLS + j]) > max_val) {
max_val = fabs(m1[i * COLS + j] - m2[i * COLS + j]);
}
}
return max_val;
}
/***********CPU***********/
void compute_new_values(double* old_matrix, double* new_matrix){
for (int i = 1; i < ROWS-1; i++)
for (int j= 1; j < COLS-1; j++)
new_matrix[i * COLS + j] = 0.25 * (old_matrix[(i-1) * COLS + j]
+ old_matrix[(i+1) * COLS + j]
+ old_matrix[i * COLS + (j-1)]
+ old_matrix[i * COLS + (j+1)]);
new_matrix[I_FIX * COLS + J_FIX] = TEMP;
}
/***********CPU***********/
/***********GPU***********/
__global__ void compute_new_values_gpu(const double* __restrict__ d_old_matrix,
double* __restrict__ d_new_matrix){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i == I_FIX && j == J_FIX)
d_new_matrix[I_FIX * COLS + J_FIX] = TEMP;
else if (0 < i && i < ROWS - 1 && 0 < j && j < COLS - 1)
d_new_matrix[i * COLS + j] = 0.25 * (d_old_matrix[(i-1) * COLS + j]
+ d_old_matrix[(i+1) * COLS + j]
+ d_old_matrix[i * COLS + (j-1)]
+ d_old_matrix[i * COLS + (j+1)]);
}
/***********GPU***********/
/* Round the value of a / b to nearest higher integer value
*/
int divideUp(int n1, int n2) {
return (n1 % n2 != 0) ? (n1 / n2 + 1) : (n1 / n2);
}
int main(int argc, char *argv[]) {
//CPU
double *a_old = alloc_matrix(); //allocate memory for the matrices
double *a_new = alloc_matrix();
struct timeval a_start, a_end;
double tos_serial;
// GPU
long int iterations = 0, i = 0;
double *h_in = alloc_matrix(); //allocate memory for the matrices
double *h_out = alloc_matrix();
int error;
double *d_in;
double *d_out;
struct timeval h_start, h_end;
double tos_cuda;
printf("DISPLAY = %d DEBUG = %d ROWS = %d COLS = %d\n", DISPLAY, DEBUG, ROWS, COLS);
printf("BLOCK_SIZE_X = %d BLOCK_SIZE_Y = %d\n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
/*************************CPU**************************/
init_matrix(a_old); //initialize the matrices
init_matrix(a_new);
printf("CPU: Starting the serial heat distribution\n");
if (DISPLAY || DEBUG){
printf("CPU:The initial heat distribution matrix is:\n");
print_matrix(a_old);
}
gettimeofday(&a_start, NULL);
while (1) {
if (DEBUG)
printf("\nCPU:Performing a new iteration...%ld\n", iterations);
//compute new values and put them into a_new
compute_new_values(a_old, a_new);
if (DEBUG) {
printf("CPU:a_old is:\n"); //output matrix to screen
print_matrix(a_old);
printf("CPU:a_new is:\n");
print_matrix(a_new);
}
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(a_old, a_new);
if (DEBUG)
printf("CPU:Max diff is: %f\n", max_diff);
if (max_diff < EPS)
break;
copy_matrix(a_old, a_new); //assign values of a_new to a_old
if (DEBUG)
printf("CPU:End of iteration...%ld\n", iterations);
++iterations;
}
gettimeofday(&a_end, NULL);
tos_serial = (a_end.tv_sec - a_start.tv_sec) + \
(a_end.tv_usec - a_start.tv_usec)/1000000.0;
printf("CPU:Time required is %.3e\n", tos_serial);
if (DISPLAY || DEBUG){
printf("CPU:The final heat distribution matrix is:\n");
print_matrix(a_new);
}
printf("The iterations performed by the serial code are %ld\n", iterations);
/*************************GPU**********************/
printf("GPU:Starting the parallel heat distribution on CUDA\n");
init_matrix(h_in); //initialize the matrices
init_matrix(h_out);
cudaMalloc((void **)&d_in, (size_t) ROWS * COLS * sizeof(double));
error = cudaGetLastError();
if (DEBUG)
printf("GPU:d_in cudaMalloc error = %d\n", error);
cudaMalloc((void **)&d_out, (size_t) ROWS * COLS * sizeof(double));
error = cudaGetLastError();
if (DEBUG)
printf("GPU:d_out cudaMalloc error = %d\n", error);
// copy data from host memory to device memory
cudaMemcpy(d_in, h_in, ROWS * COLS * sizeof(double), cudaMemcpyHostToDevice);
// copy data from device memory to device memory
cudaMemcpy(d_out, d_in, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToDevice);
// block and grid dimensions
dim3 blocks(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grids(divideUp(ROWS, BLOCK_SIZE_X), divideUp(COLS, BLOCK_SIZE_Y));
gettimeofday(&h_start, NULL);
for(i = 0; i < iterations + 1; ++i) {
//compute new values and put them into d_out
compute_new_values_gpu<<<grids, blocks>>>(d_in, d_out);
if (DEBUG){
printf("GPU:Performing a new iteration...%ld\n", i);
// copy data from device memory to host memory
cudaMemcpy(h_in, d_in, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(h_out, d_out, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToHost);
printf("GPU:d_in is:\n"); //output d_in to screen
print_matrix(h_in);
printf("GPU:d_out is:\n"); //output d_out to screen
print_matrix(h_out);
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(h_in, h_out);
printf("GPU:Max diff is: %f\n", max_diff);
if (max_diff < EPS)
break;
printf("GPU:End of iteration...%ld\n", i);
}
// make the current d_out as d_in
cudaMemcpy(d_in, d_out, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToDevice);
}
gettimeofday(&h_end, NULL);
// copy data from device memory to host memory
cudaMemcpy(h_out, d_out, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToHost);
tos_cuda = (h_end.tv_sec - h_start.tv_sec) + \
(h_end.tv_usec - h_start.tv_usec)/1000000.0;
printf("GPU:Time required is %.3e seconds\n", tos_cuda);
if (DISPLAY || DEBUG){
printf("GPU:The final heat distribution matrix is:\n");
print_matrix(h_out);
}
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(h_out, a_new);
printf("GPU:Max diff between serial and CUDA implementation is: %f\n",\
max_diff);
printf("Speed Up achieved is : %.3lf\n", tos_serial/tos_cuda);
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
1d448a5b0a9b5012335bed5caf40515f114b585b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define GRID_SIZE 8
#define BLOCK_SIZE 24
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 1d448a5b0a9b5012335bed5caf40515f114b585b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define GRID_SIZE 8
#define BLOCK_SIZE 24
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
6d442b045abf7220475ddbe1d63de00943050671.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
// Compute starting index that thread will be loading
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
//@@ Load a segment of the input vector into shared memory
volatile __shared__ float sdata[BLOCK_SIZE];
if(i < len) {
if(i + blockDim.x < len) {
sdata[threadIdx.x] = input[i] + input[i + blockDim.x];
}
else {
sdata[threadIdx.x] = input[i];
}
}
__syncthreads();
//@@ Traverse the reduction tree
if(BLOCK_SIZE >= 1024) {
if(threadIdx.x < 512) {
sdata[threadIdx.x] += sdata[threadIdx.x + 512];
}
__syncthreads();
}
if(BLOCK_SIZE >= 512) {
if(threadIdx.x < 256) {
sdata[threadIdx.x] += sdata[threadIdx.x + 256];
}
__syncthreads();
}
if(BLOCK_SIZE >= 256) {
if(threadIdx.x < 128) {
sdata[threadIdx.x] += sdata[threadIdx.x + 128];
}
__syncthreads();
}
if(BLOCK_SIZE >= 128) {
if(threadIdx.x < 64) {
sdata[threadIdx.x] += sdata[threadIdx.x + 64];
}
__syncthreads();
}
if(threadIdx.x < 32) {
if(BLOCK_SIZE >= 64) {
sdata[threadIdx.x] += sdata[threadIdx.x + 32];
}
if(BLOCK_SIZE >= 32) {
sdata[threadIdx.x] += sdata[threadIdx.x + 16];
}
if(BLOCK_SIZE >= 16) {
sdata[threadIdx.x] += sdata[threadIdx.x + 8];
}
if(BLOCK_SIZE >= 8) {
sdata[threadIdx.x] += sdata[threadIdx.x + 4];
}
if(BLOCK_SIZE >= 3) {
sdata[threadIdx.x] += sdata[threadIdx.x + 2];
}
if(BLOCK_SIZE >= 2) {
sdata[threadIdx.x] += sdata[threadIdx.x + 1];
}
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
output[blockIdx.x] = sdata[0];
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc(&deviceInput, numInputElements * sizeof(float));
hipMalloc(&deviceOutput, numOutputElements * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 blocksPerGrid(ceil((float)numInputElements / (2.0*(float)BLOCK_SIZE)), 1, 1);
dim3 threadsPerBlock(BLOCK_SIZE, 1, 1);
wbLog(TRACE, "The dimensions of grid is ", blocksPerGrid.x, " x ", blocksPerGrid.y, " x ", blocksPerGrid.z); // Debugging
wbLog(TRACE, "The dimensions of block is ", threadsPerBlock.x, " x ", threadsPerBlock.y, " x ", threadsPerBlock.z); // Debugging
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( total), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
| 6d442b045abf7220475ddbe1d63de00943050671.cu | // Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
// Compute starting index that thread will be loading
int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
//@@ Load a segment of the input vector into shared memory
volatile __shared__ float sdata[BLOCK_SIZE];
if(i < len) {
if(i + blockDim.x < len) {
sdata[threadIdx.x] = input[i] + input[i + blockDim.x];
}
else {
sdata[threadIdx.x] = input[i];
}
}
__syncthreads();
//@@ Traverse the reduction tree
if(BLOCK_SIZE >= 1024) {
if(threadIdx.x < 512) {
sdata[threadIdx.x] += sdata[threadIdx.x + 512];
}
__syncthreads();
}
if(BLOCK_SIZE >= 512) {
if(threadIdx.x < 256) {
sdata[threadIdx.x] += sdata[threadIdx.x + 256];
}
__syncthreads();
}
if(BLOCK_SIZE >= 256) {
if(threadIdx.x < 128) {
sdata[threadIdx.x] += sdata[threadIdx.x + 128];
}
__syncthreads();
}
if(BLOCK_SIZE >= 128) {
if(threadIdx.x < 64) {
sdata[threadIdx.x] += sdata[threadIdx.x + 64];
}
__syncthreads();
}
if(threadIdx.x < 32) {
if(BLOCK_SIZE >= 64) {
sdata[threadIdx.x] += sdata[threadIdx.x + 32];
}
if(BLOCK_SIZE >= 32) {
sdata[threadIdx.x] += sdata[threadIdx.x + 16];
}
if(BLOCK_SIZE >= 16) {
sdata[threadIdx.x] += sdata[threadIdx.x + 8];
}
if(BLOCK_SIZE >= 8) {
sdata[threadIdx.x] += sdata[threadIdx.x + 4];
}
if(BLOCK_SIZE >= 3) {
sdata[threadIdx.x] += sdata[threadIdx.x + 2];
}
if(BLOCK_SIZE >= 2) {
sdata[threadIdx.x] += sdata[threadIdx.x + 1];
}
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
output[blockIdx.x] = sdata[0];
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc(&deviceInput, numInputElements * sizeof(float));
cudaMalloc(&deviceOutput, numOutputElements * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 blocksPerGrid(ceil((float)numInputElements / (2.0*(float)BLOCK_SIZE)), 1, 1);
dim3 threadsPerBlock(BLOCK_SIZE, 1, 1);
wbLog(TRACE, "The dimensions of grid is ", blocksPerGrid.x, " x ", blocksPerGrid.y, " x ", blocksPerGrid.z); // Debugging
wbLog(TRACE, "The dimensions of block is ", threadsPerBlock.x, " x ", threadsPerBlock.y, " x ", threadsPerBlock.z); // Debugging
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
total<<<blocksPerGrid, threadsPerBlock>>>(deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
|
f095e2df46b3841cd36902d51cccb1c85e878f27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <iostream>
using namespace std;
#define BLOCK_DIM 16
// create the index of a diagonal matrix
__global__ void diagCsrIndsKernel(const int numRows, int *inds) {
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx <= numRows) {
inds[idx] = idx;
}
}
void createDiagXcsrInds(int size, int **inds) {
CUDA_CHECK(hipMalloc((void**)&(*inds), sizeof(int)*(size + 1)));
hipLaunchKernelGGL(( diagCsrIndsKernel) , dim3(CAFFE_GET_BLOCKS(size + 1)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, size, *inds);
}
// wrapper for hipsparseDcsrgeam
void cusparseAddDSpMat(hipsparseHandle_t handle, const int* rowPtrsA, const int* colIndsA,
const double* valsA, const int nnzA, const int* rowPtrsB,
const int* colIndsB, const double* valsB, const int nnzB,
const int m, const int n, const double alpha, const double beta,
int** rowPtrsC, int** colIndsC, double** valsC, int* nnzC)
{
hipsparseMatDescr_t descr;
CUDA_CHECK(hipsparseCreateMatDescr(&descr));
CUDA_CHECK(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
CUDA_CHECK(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO));
CUDA_CHECK(hipMalloc((void**)&(*rowPtrsC), sizeof(int)*(m + 1)));
CUDA_CHECK(hipsparseXcsrgeamNnz(handle, m, n, descr, nnzA,
rowPtrsA, colIndsA, descr, nnzB, rowPtrsB, colIndsB, descr,
*rowPtrsC, nnzC));
CUDA_CHECK(hipMalloc((void**)&(*colIndsC), sizeof(int) * (*nnzC)));
CUDA_CHECK(hipMalloc((void**)&(*valsC), sizeof(double) * (*nnzC)));
CUDA_CHECK(hipsparseDcsrgeam(handle, m, n, &alpha, descr, nnzA,
valsA, rowPtrsA, colIndsA, &beta, descr, nnzB, valsB,
rowPtrsB, colIndsB, descr, *valsC, *rowPtrsC, *colIndsC));
CUDA_CHECK(hipsparseDestroyMatDescr(descr));
}
// cusparseDcoodup2coo_compress and it's helper functions
// this function eliminate the duplicate entries in
// a sparse matrix stored in coo format by sum up
// the value of the same entry
template<typename Dtype>
__global__ void mapVector(const Dtype *vals_src, const int* permut,
const int num, Dtype *vals_dst) {
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < num) {
vals_dst[idx] = vals_src[permut[idx]];
}
}
// based on the assumption that the coo mat is sorted
// look the left element to determine whether it's a duplicate entry
__global__ void maskDuplicate(int* rowInds, int* colInds, int num, int* mask) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < num && idx > 0) {
if (rowInds[idx] == rowInds[idx - 1] && colInds[idx] == colInds[idx - 1]) {
mask[idx] = 0; // mark as duplicate
}
else {
mask[idx] = 1;
}
}
else if (idx == 0) {
mask[idx] = 1;
}
}
// 1. look left, check if it's the first element
// 2. go right, add all the duplicate element
template<typename Dtype>
__global__ void reduceByMask(int length,
int* mask,
int* rowInds,
int* colInds,
Dtype* vals,
int* compRowInds,
int* compColInds,
Dtype* compVals)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
int compInd;
Dtype temp;
if (idx < length) {
if (idx == 0 || mask[idx - 1] != mask[idx]) { // thread hit
int offset = 0;
temp = vals[idx];
while (idx + offset + 1 < length &&\
mask[idx + offset] == mask[idx + offset + 1]) {
temp += vals[idx + offset + 1];
++offset;
}
// index in compress mode, hard code a -1 for our scenario
compInd = mask[idx] - 1;
compRowInds[compInd] = rowInds[idx];
compColInds[compInd] = colInds[idx];
compVals[compInd] = temp;
}
}
}
void cusparseDcoodup2coo_compress(hipsparseHandle_t cusparseHandle,
int nnz, int m, int n, const double* vals, const int* rowInds, const int* colInds,
double** compVals, int** compRowInds, int** compColInds, int* compNnz)
{
size_t pBufferSizeInBytes = 0;
int *d_p; // permutation
void *pBuffer;
int *mask, *rowIndsCpy, *colIndsCpy;
double *valsCpy, *d_vals_t;
// step 0: allocation and copy
CUDA_CHECK(hipMalloc((void**)&rowIndsCpy, sizeof(int)*nnz));
CUDA_CHECK(hipMalloc((void**)&colIndsCpy, sizeof(int)*nnz));
CUDA_CHECK(hipMalloc((void**)&valsCpy, sizeof(double)*nnz));
CUDA_CHECK(hipMemcpy(rowIndsCpy, rowInds, sizeof(int)*nnz, hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(colIndsCpy, colInds, sizeof(int)*nnz, hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(valsCpy, vals, sizeof(double)*nnz, hipMemcpyDeviceToDevice));
// step 1: allocation and sort
CUDA_CHECK(hipMalloc((void**)&d_p, sizeof(int)*nnz));
CUDA_CHECK(hipsparseCreateIdentityPermutation(cusparseHandle, nnz, d_p));
CUDA_CHECK(hipsparseXcoosort_bufferSizeExt(cusparseHandle, m, n, \
nnz, rowIndsCpy, colIndsCpy, &pBufferSizeInBytes));
CUDA_CHECK(hipMalloc((void**)&pBuffer, pBufferSizeInBytes));
CUDA_CHECK(hipsparseXcoosortByRow(cusparseHandle, m, n, \
nnz, rowIndsCpy, colIndsCpy, d_p, pBuffer));
CUDA_CHECK(hipFree(pBuffer));
CUDA_CHECK(hipMalloc((void**)&d_vals_t, sizeof(double)*nnz));
CUDA_CHECK(hipMemcpy(d_vals_t, valsCpy, sizeof(double)*nnz, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( mapVector<double>) , dim3(CAFFE_GET_BLOCKS(nnz)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
d_vals_t, d_p, nnz, valsCpy);
CUDA_CHECK(hipFree(d_vals_t));
CUDA_CHECK(hipFree(d_p));
// step 2: mask and scan(inclusive)
CUDA_CHECK(hipMalloc((void**)&mask, sizeof(int)*nnz));
hipLaunchKernelGGL(( maskDuplicate) , dim3(CAFFE_GET_BLOCKS(nnz)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
rowIndsCpy, colIndsCpy, nnz, mask);
thrust::inclusive_scan(thrust::device, mask, mask + nnz, mask);
// step 3: allocate and reduce
CUDA_CHECK(hipMemcpy(compNnz, &mask[nnz - 1], sizeof(int), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMalloc((void**)&(*compRowInds), sizeof(int) * (*compNnz)));
CUDA_CHECK(hipMalloc((void**)&(*compColInds), sizeof(int) * (*compNnz)));
CUDA_CHECK(hipMalloc((void**)&(*compVals), sizeof(double) * (*compNnz)));
hipLaunchKernelGGL(( reduceByMask<double>) , dim3(CAFFE_GET_BLOCKS(nnz)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
nnz, mask, rowIndsCpy, colIndsCpy,
valsCpy, *compRowInds, *compColInds, *compVals);
CUDA_CHECK(hipFree(mask));
CUDA_CHECK(hipFree(rowIndsCpy));
CUDA_CHECK(hipFree(colIndsCpy));
CUDA_CHECK(hipFree(valsCpy));
}
//
// laplacian function and it's helper functions
//
// this function extract a small window from the original image
template<typename Dtype>
__device__ void extract_window(const uchar *src, const int idx_x,
const int idx_y, const int c, const int w_rad,
const int imcols, Dtype *window)
{
for (int iy = 0; iy < 2 * w_rad + 1; ++iy) {
for (int ix = 0; ix < 2 * w_rad + 1; ++ix) {
for (int iz = 0; iz < c; ++iz) {
window[c * (iy * (2 * w_rad + 1) + ix) + iz] = \
(Dtype)src[c*((idx_y - w_rad + iy)*imcols + (idx_x - w_rad + ix)) + iz];
}
}
}
}
// this function calculates the mean of pixels in a small window
template<typename Dtype>
__device__ void calc_mu(const int m, const int n, const Dtype *mat, Dtype *mu)
{
for(int i = 0; i < n; ++i){
mu[i] = 0;
}
for (int j = 0; j < m; ++j) {
for (int i = 0; i < n; ++i) {
mu[i] += mat[j*n + i];
}
}
for(int i = 0; i < n; ++i){
mu[i] /= m;
}
}
// this function calculates an intermediate value var
// assume n is the remain dimension, which means the result is n x n
template<typename Dtype>
__device__ void calc_var(const Dtype *data, const Dtype *mu,
const int m, const int n, const Dtype eps, Dtype *var)
{
Dtype ele_val = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
ele_val = 0;
for (int k = 0; k < m; ++k) {
ele_val += data[k*n + i] * data[k*n + j];
}
ele_val /= m;
ele_val -= mu[i] * mu[j];
ele_val += (Dtype)(i == j) * eps / m;
var[i*n + j] = ele_val;
}
}
}
// used the property of symmetric matrix
// there seem to be no generic way to represent this
// so I HARD CODE IT
template<typename Dtype>
__device__ void calc_inverse3X3(const Dtype* mat, Dtype *inv)
{
Dtype det = mat[0] * (mat[4] * mat[8] - mat[5] * mat[5]) -
mat[1] * (mat[1] * mat[8] - mat[5] * mat[2]) +
mat[2] * (mat[1] * mat[5] - mat[4] * mat[2]);
det = Dtype(1)/det;
inv[0] = (mat[4] * mat[8] - mat[5] * mat[7]) * det;
inv[1] = (mat[2] * mat[7] - mat[1] * mat[8]) * det;
inv[2] = (mat[1] * mat[5] - mat[4] * mat[2]) * det;
inv[4] = (mat[0] * mat[8] - mat[2] * mat[6]) * det;
inv[5] = (mat[2] * mat[3] - mat[0] * mat[5]) * det;
inv[8] = (mat[0] * mat[4] - mat[1] * mat[3]) * det;
inv[3] = inv[1];
inv[6] = inv[2];
inv[7] = inv[5];
}
// m, n describe the shape of window_data
template<typename Dtype>
__device__ void calc_val(const Dtype *window_data, const Dtype *win_var_inv, const Dtype *win_mu,
const int m, const int n, Dtype *win_val)
{
for (int i = 0; i < m; ++i) {
for (int j = 0; j < m; ++j) {
Dtype ele_res = 0;
for (int k = 0; k < n; ++k) {
for (int l = 0; l < n; ++l) {
ele_res += \
win_var_inv[k*n + l] * (window_data[i*n + k] - win_mu[k])*(window_data[j*n + l] - win_mu[l]);
}
}
ele_res += 1;
ele_res /= m;
win_val[i*m + j] = (Dtype)(i==j) - ele_res;
}
}
}
__device__ void fill_inds(const int col, const int idx_x, const int idx_y,
const int w_rad, int *rowInds, int *colInds)
{
int width = 2 * w_rad + 1;
int neb_size = width * width;
int working = 0;
for (int i = 0; i < neb_size; ++i) {
for (int j = i; j < neb_size; ++j) {
rowInds[working] = (i / width + idx_y - w_rad)*col + i % 3 + idx_x - w_rad; // i-th pixel
colInds[working] = (j / width + idx_y - w_rad)*col + j % 3 + idx_x - w_rad; // j-th pixel
++working;
}
}
}
template<typename Dtype>
__device__ void fill_vals(const int neb_size, const Dtype *win_vals, Dtype *vals) {
int working = 0;
for (int i = 0; i < neb_size; ++i) {
for (int j = i; j < neb_size; ++j) {
vals[working] = win_vals[i*neb_size + j];
++working;
}
}
}
// had to hard code for w_rad = 1, channels = 3.
template<typename Dtype>
__global__ void getCfLaplacianKernel(const Dtype eps, const int imrows,
const int imcols, const uchar* src,
int *rowInds, int *colInds, Dtype *vals)
{
int w_rad = 1;
int neb_size = (2 * w_rad + 1)*(2 * w_rad + 1);
int c = 3; // hard code the channels
// window size is the size of the laplacian of the small neighbor
int window_size = neb_size*(neb_size + 1) / 2;
// using y, x coordinate
unsigned int idx_x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int idx_y = threadIdx.y + blockIdx.y * blockDim.y;
Dtype imgW[9 * 3]; // hard coded as neb_size = 9, channels = 3
__shared__ Dtype win_mu[BLOCK_DIM*BLOCK_DIM*3];
__shared__ Dtype win_var[BLOCK_DIM*BLOCK_DIM*3*3];
__shared__ Dtype win_var_inv[BLOCK_DIM*BLOCK_DIM*3*3];
unsigned int idx_block = threadIdx.x + threadIdx.y*blockDim.x;
Dtype win_val[9 * 9]; // neb_size = 9
int pBufferOffset = ((idx_y - w_rad)*(imcols - 2 * w_rad) + idx_x - w_rad)*window_size;
if (idx_y >= w_rad && idx_y < imrows - w_rad) {
if (idx_x >= w_rad && idx_x < imcols - w_rad) {
extract_window<Dtype>(src, idx_x, idx_y, c, w_rad, imcols, imgW);
calc_mu<Dtype>(neb_size, c, imgW, win_mu+idx_block*3);
calc_var<Dtype>(imgW, win_mu+idx_block*3, neb_size, c, eps, win_var + 3*3*idx_block);
calc_inverse3X3<Dtype>(win_var + 3*3*idx_block, win_var_inv + 3*3*idx_block);
// calc_inverse<Dtype>(win_var + 3*3*idx_block, c, win_var_inv + 3*3*idx_block);
calc_val<Dtype>(imgW, win_var_inv + 3*3*idx_block, win_mu+idx_block*3,
neb_size, c, win_val);
// fill in val and inds
fill_inds(imcols, idx_x, idx_y, w_rad, rowInds + pBufferOffset, colInds + pBufferOffset);
fill_vals<Dtype>(neb_size, win_val, vals + pBufferOffset);
}
}
}
void getCfLaplacianDCoo_gpu(hipsparseHandle_t cusparseHandle, const double eps,
const int w_rad, const cv::Mat src,
int **rowPtrs, int **colInds, double **vals, int *nnz)
{
// host set up
int c = src.channels();
int imrows = src.rows;
int imcols = src.cols;
int neb_size = (2 * w_rad + 1)*(2 * w_rad + 1);
int imgSize = imrows * imcols;
// window size is the size of the laplacian of the small neighbor
int window_size = neb_size * (neb_size + 1) / 2;
int num_window = (imrows - 2 * w_rad)*(imcols - 2 * w_rad);
// cuda set up
int *d_rowBuffer, *d_colBuffer, *d_rowInds;
uchar *d_src;
double *d_valBuffer;
CUDA_CHECK(hipMalloc((void**)&d_rowBuffer, sizeof(int)*window_size*num_window));
CUDA_CHECK(hipMalloc((void**)&d_colBuffer, sizeof(int)*window_size*num_window));
CUDA_CHECK(hipMalloc((void**)&d_valBuffer, sizeof(double)*window_size*num_window));
// CUDA_CHECK(hipMalloc((void**)&d_src, sizeof(double)*imrows*imcols));
CUDA_CHECK(hipMalloc((void**)&d_src, sizeof(uchar)*c*imrows*imcols));
CUDA_CHECK(hipMemcpy(d_src, src.data, sizeof(uchar)*c*imrows*imcols, hipMemcpyDefault));
hipLaunchKernelGGL(( getCfLaplacianKernel<double>)
, dim3(dim3((imcols+BLOCK_DIM-1)/BLOCK_DIM,(imrows + BLOCK_DIM-1)/BLOCK_DIM)),
dim3(dim3(BLOCK_DIM, BLOCK_DIM)) , 0, 0,
eps, imrows, imcols, d_src, d_rowBuffer, d_colBuffer, d_valBuffer);
CUDA_CHECK(hipFree(d_src));
hipDeviceSynchronize();
cusparseDcoodup2coo_compress(\
cusparseHandle, window_size * num_window, imgSize, imgSize, \
d_valBuffer, d_rowBuffer, d_colBuffer, \
vals, &d_rowInds, colInds, nnz);
CUDA_CHECK(hipFree(d_valBuffer));
CUDA_CHECK(hipFree(d_colBuffer));
CUDA_CHECK(hipFree(d_rowBuffer));
// transform into csr format
CUDA_CHECK(hipMalloc((void**)&(*rowPtrs), sizeof(int)*(imgSize + 1)));
CUDA_CHECK(hipsparseXcoo2csr(cusparseHandle, d_rowInds, *nnz, imgSize, *rowPtrs, \
HIPSPARSE_INDEX_BASE_ZERO));
CUDA_CHECK(hipFree(d_rowInds));
}
cv::Mat createIndsMat(const int imrows, const int imcols) {
cv::Mat inds(imrows, imcols, CV_32S);
int imagesize = imrows*imcols;
int *data = (int*)inds.data;
#pragma omp parallel for
for (int i = 0; i < imagesize; ++i) {
data[i] = i;
}
return inds;
}
void pardiso_solve_sym(const MKL_INT numRows, const MKL_INT lhsNnz,
const int* lhsRowPtrs, const int* lhsColInds, const double* lhsVals,
const double *rhsVals, double *res) {
// copy and store the data in MKL_INT so the code can be run under mkl_ilp64
MKL_INT* rowPtrs_t = (MKL_INT*)malloc(sizeof(MKL_INT)*(numRows + 1));
MKL_INT* colInds_t = (MKL_INT*)malloc(sizeof(MKL_INT)*lhsNnz);
for (MKL_INT i = 0; i < lhsNnz; ++i) {
colInds_t[i] = (MKL_INT)lhsColInds[i];
}
for (MKL_INT i = 0; i < numRows + 1; ++i) {
rowPtrs_t[i] = (MKL_INT)lhsRowPtrs[i];
}
MKL_INT mtype = -2; /* Real symmetric matrix */
// MKL_INT mtype = 2; /* real positive symmetric matrix */
MKL_INT nrhs = 1; /* Number of right hand sides. */
void *pt[64];
/* Pardiso control parameters. */
MKL_INT iparm[64];
MKL_INT maxfct, mnum, phase, error = 0, msglvl;
/* Auxiliary variables. */
double ddum; /* Double dummy */
MKL_INT idum; /* Integer dummy. */
// set up parameter
for (MKL_INT i = 0; i < 64; i++)
{
iparm[i] = 0;
}
iparm[0] = 1; /* No solver default */
// iparm[1] = 2; /* Fill-in reordering from METIS */
iparm[1] = 3; /* parallel version of METIS */
iparm[3] = 0; /* No iterative-direct algorithm */
iparm[4] = 0; /* No user fill-in reducing permutation */
iparm[5] = 0; /* Write solution into x */
iparm[7] = 2; /* Max numbers of iterative refinement steps */
iparm[9] = 13; /* Perturb the pivot elements with 1E-13 */
iparm[10] = 1; /* Use nonsymmetric permutation and scaling MPS */
iparm[12] = 0; /* Maximum weighted matching algorithm is switched-off (default for symmetric) */
iparm[13] = 0; /* Output: Number of perturbed pivots */
iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
iparm[18] = -1; /* Output: Mflops for LU factorization */
iparm[19] = 0; /* Output: Numbers of CG Iterations */
iparm[34] = 1; /* set to 0-based index */
maxfct = 1; /* Maximum number of numerical factorizations. */
mnum = 1; /* Which factorization to use. */
msglvl = 0; /* Don't print statistical information in file */
error = 0; /* Initialize error flag */
for (MKL_INT i = 0; i < 64; i++){
pt[i] = 0;
}
// reorder and allocate memory
phase = 11;
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, lhsVals, rowPtrs_t, colInds_t, &idum, &nrhs, iparm, &msglvl, &ddum, &ddum, &error);
if (error != 0){
printf("\nERROR during symbolic factorization: %d", error);
exit(1);
}
// factorization
phase = 22;
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, lhsVals, rowPtrs_t, colInds_t, &idum, &nrhs, iparm, &msglvl, &ddum, &ddum, &error);
if (error != 0){
printf("\nERROR during numerical factorization: %d", error);
exit(2);
}
// Back substitution and iterative refinement.
phase = 33;
iparm[7] = 2; // Max numbers of iterative refinement steps.
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, lhsVals, rowPtrs_t, colInds_t, &idum, &nrhs, iparm, &msglvl, (void*)rhsVals, res, &error);
if (error != 0){
printf("\nERROR during solution: %d", error);
exit(3);
}
// Termination and release of memory.
phase = -1; /* Release internal memory. */
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, &ddum, rowPtrs_t, colInds_t, &idum, &nrhs,
iparm, &msglvl, &ddum, &ddum, &error);
free(rowPtrs_t);
free(colInds_t);
}
void setTRI(cv::String tri_path, const cv::Mat src, cv::Mat &all, cv::Mat &fore) {
// read the image and perform some sanity check
cv::Mat trimap = cv::imread(tri_path, CV_LOAD_IMAGE_COLOR);
if (src.rows != trimap.rows || src.cols != trimap.cols) {
std::cout << "Dimension Not Match" << std::endl;
exit(EXIT_FAILURE);
}
cv::Mat channels[3];
cv::Mat src_tmp;
src.convertTo(src_tmp, CV_64FC3);
trimap.convertTo(trimap, CV_64FC3);
cv::split(src_tmp, channels);
src_tmp = (channels[0] + channels[1] + channels[2]) / 3.0;
cv::split(trimap, channels);
trimap = (channels[0] + channels[1] + channels[2]) / 3.0;
trimap = trimap - src_tmp;
fore = trimap > 0.02;
all = trimap < -0.02 | trimap > 0.02;
fore.convertTo(fore, CV_64FC1);
all.convertTo(all, CV_64FC1);
}
| f095e2df46b3841cd36902d51cccb1c85e878f27.cu | #include "utils.h"
#include <iostream>
using namespace std;
#define BLOCK_DIM 16
// create the index of a diagonal matrix
__global__ void diagCsrIndsKernel(const int numRows, int *inds) {
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx <= numRows) {
inds[idx] = idx;
}
}
void createDiagXcsrInds(int size, int **inds) {
CUDA_CHECK(cudaMalloc((void**)&(*inds), sizeof(int)*(size + 1)));
diagCsrIndsKernel <<<CAFFE_GET_BLOCKS(size + 1), CAFFE_CUDA_NUM_THREADS >>>(size, *inds);
}
// wrapper for cusparseDcsrgeam
void cusparseAddDSpMat(cusparseHandle_t handle, const int* rowPtrsA, const int* colIndsA,
const double* valsA, const int nnzA, const int* rowPtrsB,
const int* colIndsB, const double* valsB, const int nnzB,
const int m, const int n, const double alpha, const double beta,
int** rowPtrsC, int** colIndsC, double** valsC, int* nnzC)
{
cusparseMatDescr_t descr;
CUDA_CHECK(cusparseCreateMatDescr(&descr));
CUDA_CHECK(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
CUDA_CHECK(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
CUDA_CHECK(cudaMalloc((void**)&(*rowPtrsC), sizeof(int)*(m + 1)));
CUDA_CHECK(cusparseXcsrgeamNnz(handle, m, n, descr, nnzA,
rowPtrsA, colIndsA, descr, nnzB, rowPtrsB, colIndsB, descr,
*rowPtrsC, nnzC));
CUDA_CHECK(cudaMalloc((void**)&(*colIndsC), sizeof(int) * (*nnzC)));
CUDA_CHECK(cudaMalloc((void**)&(*valsC), sizeof(double) * (*nnzC)));
CUDA_CHECK(cusparseDcsrgeam(handle, m, n, &alpha, descr, nnzA,
valsA, rowPtrsA, colIndsA, &beta, descr, nnzB, valsB,
rowPtrsB, colIndsB, descr, *valsC, *rowPtrsC, *colIndsC));
CUDA_CHECK(cusparseDestroyMatDescr(descr));
}
// cusparseDcoodup2coo_compress and it's helper functions
// this function eliminate the duplicate entries in
// a sparse matrix stored in coo format by sum up
// the value of the same entry
template<typename Dtype>
__global__ void mapVector(const Dtype *vals_src, const int* permut,
const int num, Dtype *vals_dst) {
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < num) {
vals_dst[idx] = vals_src[permut[idx]];
}
}
// based on the assumption that the coo mat is sorted
// look the left element to determine whether it's a duplicate entry
__global__ void maskDuplicate(int* rowInds, int* colInds, int num, int* mask) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < num && idx > 0) {
if (rowInds[idx] == rowInds[idx - 1] && colInds[idx] == colInds[idx - 1]) {
mask[idx] = 0; // mark as duplicate
}
else {
mask[idx] = 1;
}
}
else if (idx == 0) {
mask[idx] = 1;
}
}
// 1. look left, check if it's the first element
// 2. go right, add all the duplicate element
template<typename Dtype>
__global__ void reduceByMask(int length,
int* mask,
int* rowInds,
int* colInds,
Dtype* vals,
int* compRowInds,
int* compColInds,
Dtype* compVals)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
int compInd;
Dtype temp;
if (idx < length) {
if (idx == 0 || mask[idx - 1] != mask[idx]) { // thread hit
int offset = 0;
temp = vals[idx];
while (idx + offset + 1 < length &&\
mask[idx + offset] == mask[idx + offset + 1]) {
temp += vals[idx + offset + 1];
++offset;
}
// index in compress mode, hard code a -1 for our scenario
compInd = mask[idx] - 1;
compRowInds[compInd] = rowInds[idx];
compColInds[compInd] = colInds[idx];
compVals[compInd] = temp;
}
}
}
void cusparseDcoodup2coo_compress(cusparseHandle_t cusparseHandle,
int nnz, int m, int n, const double* vals, const int* rowInds, const int* colInds,
double** compVals, int** compRowInds, int** compColInds, int* compNnz)
{
size_t pBufferSizeInBytes = 0;
int *d_p; // permutation
void *pBuffer;
int *mask, *rowIndsCpy, *colIndsCpy;
double *valsCpy, *d_vals_t;
// step 0: allocation and copy
CUDA_CHECK(cudaMalloc((void**)&rowIndsCpy, sizeof(int)*nnz));
CUDA_CHECK(cudaMalloc((void**)&colIndsCpy, sizeof(int)*nnz));
CUDA_CHECK(cudaMalloc((void**)&valsCpy, sizeof(double)*nnz));
CUDA_CHECK(cudaMemcpy(rowIndsCpy, rowInds, sizeof(int)*nnz, cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(colIndsCpy, colInds, sizeof(int)*nnz, cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(valsCpy, vals, sizeof(double)*nnz, cudaMemcpyDeviceToDevice));
// step 1: allocation and sort
CUDA_CHECK(cudaMalloc((void**)&d_p, sizeof(int)*nnz));
CUDA_CHECK(cusparseCreateIdentityPermutation(cusparseHandle, nnz, d_p));
CUDA_CHECK(cusparseXcoosort_bufferSizeExt(cusparseHandle, m, n, \
nnz, rowIndsCpy, colIndsCpy, &pBufferSizeInBytes));
CUDA_CHECK(cudaMalloc((void**)&pBuffer, pBufferSizeInBytes));
CUDA_CHECK(cusparseXcoosortByRow(cusparseHandle, m, n, \
nnz, rowIndsCpy, colIndsCpy, d_p, pBuffer));
CUDA_CHECK(cudaFree(pBuffer));
CUDA_CHECK(cudaMalloc((void**)&d_vals_t, sizeof(double)*nnz));
CUDA_CHECK(cudaMemcpy(d_vals_t, valsCpy, sizeof(double)*nnz, cudaMemcpyDeviceToDevice));
mapVector<double> <<<CAFFE_GET_BLOCKS(nnz), CAFFE_CUDA_NUM_THREADS >>>
(d_vals_t, d_p, nnz, valsCpy);
CUDA_CHECK(cudaFree(d_vals_t));
CUDA_CHECK(cudaFree(d_p));
// step 2: mask and scan(inclusive)
CUDA_CHECK(cudaMalloc((void**)&mask, sizeof(int)*nnz));
maskDuplicate <<<CAFFE_GET_BLOCKS(nnz), CAFFE_CUDA_NUM_THREADS >>>
(rowIndsCpy, colIndsCpy, nnz, mask);
thrust::inclusive_scan(thrust::device, mask, mask + nnz, mask);
// step 3: allocate and reduce
CUDA_CHECK(cudaMemcpy(compNnz, &mask[nnz - 1], sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMalloc((void**)&(*compRowInds), sizeof(int) * (*compNnz)));
CUDA_CHECK(cudaMalloc((void**)&(*compColInds), sizeof(int) * (*compNnz)));
CUDA_CHECK(cudaMalloc((void**)&(*compVals), sizeof(double) * (*compNnz)));
reduceByMask<double> <<<CAFFE_GET_BLOCKS(nnz), CAFFE_CUDA_NUM_THREADS >>>
(nnz, mask, rowIndsCpy, colIndsCpy,
valsCpy, *compRowInds, *compColInds, *compVals);
CUDA_CHECK(cudaFree(mask));
CUDA_CHECK(cudaFree(rowIndsCpy));
CUDA_CHECK(cudaFree(colIndsCpy));
CUDA_CHECK(cudaFree(valsCpy));
}
//
// laplacian function and it's helper functions
//
// this function extract a small window from the original image
template<typename Dtype>
__device__ void extract_window(const uchar *src, const int idx_x,
const int idx_y, const int c, const int w_rad,
const int imcols, Dtype *window)
{
for (int iy = 0; iy < 2 * w_rad + 1; ++iy) {
for (int ix = 0; ix < 2 * w_rad + 1; ++ix) {
for (int iz = 0; iz < c; ++iz) {
window[c * (iy * (2 * w_rad + 1) + ix) + iz] = \
(Dtype)src[c*((idx_y - w_rad + iy)*imcols + (idx_x - w_rad + ix)) + iz];
}
}
}
}
// this function calculates the mean of pixels in a small window
template<typename Dtype>
__device__ void calc_mu(const int m, const int n, const Dtype *mat, Dtype *mu)
{
for(int i = 0; i < n; ++i){
mu[i] = 0;
}
for (int j = 0; j < m; ++j) {
for (int i = 0; i < n; ++i) {
mu[i] += mat[j*n + i];
}
}
for(int i = 0; i < n; ++i){
mu[i] /= m;
}
}
// this function calculates an intermediate value var
// assume n is the remain dimension, which means the result is n x n
template<typename Dtype>
__device__ void calc_var(const Dtype *data, const Dtype *mu,
const int m, const int n, const Dtype eps, Dtype *var)
{
Dtype ele_val = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
ele_val = 0;
for (int k = 0; k < m; ++k) {
ele_val += data[k*n + i] * data[k*n + j];
}
ele_val /= m;
ele_val -= mu[i] * mu[j];
ele_val += (Dtype)(i == j) * eps / m;
var[i*n + j] = ele_val;
}
}
}
// used the property of symmetric matrix
// there seem to be no generic way to represent this
// so I HARD CODE IT
template<typename Dtype>
__device__ void calc_inverse3X3(const Dtype* mat, Dtype *inv)
{
Dtype det = mat[0] * (mat[4] * mat[8] - mat[5] * mat[5]) -
mat[1] * (mat[1] * mat[8] - mat[5] * mat[2]) +
mat[2] * (mat[1] * mat[5] - mat[4] * mat[2]);
det = Dtype(1)/det;
inv[0] = (mat[4] * mat[8] - mat[5] * mat[7]) * det;
inv[1] = (mat[2] * mat[7] - mat[1] * mat[8]) * det;
inv[2] = (mat[1] * mat[5] - mat[4] * mat[2]) * det;
inv[4] = (mat[0] * mat[8] - mat[2] * mat[6]) * det;
inv[5] = (mat[2] * mat[3] - mat[0] * mat[5]) * det;
inv[8] = (mat[0] * mat[4] - mat[1] * mat[3]) * det;
inv[3] = inv[1];
inv[6] = inv[2];
inv[7] = inv[5];
}
// m, n describe the shape of window_data
template<typename Dtype>
__device__ void calc_val(const Dtype *window_data, const Dtype *win_var_inv, const Dtype *win_mu,
const int m, const int n, Dtype *win_val)
{
for (int i = 0; i < m; ++i) {
for (int j = 0; j < m; ++j) {
Dtype ele_res = 0;
for (int k = 0; k < n; ++k) {
for (int l = 0; l < n; ++l) {
ele_res += \
win_var_inv[k*n + l] * (window_data[i*n + k] - win_mu[k])*(window_data[j*n + l] - win_mu[l]);
}
}
ele_res += 1;
ele_res /= m;
win_val[i*m + j] = (Dtype)(i==j) - ele_res;
}
}
}
__device__ void fill_inds(const int col, const int idx_x, const int idx_y,
const int w_rad, int *rowInds, int *colInds)
{
int width = 2 * w_rad + 1;
int neb_size = width * width;
int working = 0;
for (int i = 0; i < neb_size; ++i) {
for (int j = i; j < neb_size; ++j) {
rowInds[working] = (i / width + idx_y - w_rad)*col + i % 3 + idx_x - w_rad; // i-th pixel
colInds[working] = (j / width + idx_y - w_rad)*col + j % 3 + idx_x - w_rad; // j-th pixel
++working;
}
}
}
template<typename Dtype>
__device__ void fill_vals(const int neb_size, const Dtype *win_vals, Dtype *vals) {
int working = 0;
for (int i = 0; i < neb_size; ++i) {
for (int j = i; j < neb_size; ++j) {
vals[working] = win_vals[i*neb_size + j];
++working;
}
}
}
// had to hard code for w_rad = 1, channels = 3.
template<typename Dtype>
__global__ void getCfLaplacianKernel(const Dtype eps, const int imrows,
const int imcols, const uchar* src,
int *rowInds, int *colInds, Dtype *vals)
{
int w_rad = 1;
int neb_size = (2 * w_rad + 1)*(2 * w_rad + 1);
int c = 3; // hard code the channels
// window size is the size of the laplacian of the small neighbor
int window_size = neb_size*(neb_size + 1) / 2;
// using y, x coordinate
unsigned int idx_x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int idx_y = threadIdx.y + blockIdx.y * blockDim.y;
Dtype imgW[9 * 3]; // hard coded as neb_size = 9, channels = 3
__shared__ Dtype win_mu[BLOCK_DIM*BLOCK_DIM*3];
__shared__ Dtype win_var[BLOCK_DIM*BLOCK_DIM*3*3];
__shared__ Dtype win_var_inv[BLOCK_DIM*BLOCK_DIM*3*3];
unsigned int idx_block = threadIdx.x + threadIdx.y*blockDim.x;
Dtype win_val[9 * 9]; // neb_size = 9
int pBufferOffset = ((idx_y - w_rad)*(imcols - 2 * w_rad) + idx_x - w_rad)*window_size;
if (idx_y >= w_rad && idx_y < imrows - w_rad) {
if (idx_x >= w_rad && idx_x < imcols - w_rad) {
extract_window<Dtype>(src, idx_x, idx_y, c, w_rad, imcols, imgW);
calc_mu<Dtype>(neb_size, c, imgW, win_mu+idx_block*3);
calc_var<Dtype>(imgW, win_mu+idx_block*3, neb_size, c, eps, win_var + 3*3*idx_block);
calc_inverse3X3<Dtype>(win_var + 3*3*idx_block, win_var_inv + 3*3*idx_block);
// calc_inverse<Dtype>(win_var + 3*3*idx_block, c, win_var_inv + 3*3*idx_block);
calc_val<Dtype>(imgW, win_var_inv + 3*3*idx_block, win_mu+idx_block*3,
neb_size, c, win_val);
// fill in val and inds
fill_inds(imcols, idx_x, idx_y, w_rad, rowInds + pBufferOffset, colInds + pBufferOffset);
fill_vals<Dtype>(neb_size, win_val, vals + pBufferOffset);
}
}
}
void getCfLaplacianDCoo_gpu(cusparseHandle_t cusparseHandle, const double eps,
const int w_rad, const cv::Mat src,
int **rowPtrs, int **colInds, double **vals, int *nnz)
{
// host set up
int c = src.channels();
int imrows = src.rows;
int imcols = src.cols;
int neb_size = (2 * w_rad + 1)*(2 * w_rad + 1);
int imgSize = imrows * imcols;
// window size is the size of the laplacian of the small neighbor
int window_size = neb_size * (neb_size + 1) / 2;
int num_window = (imrows - 2 * w_rad)*(imcols - 2 * w_rad);
// cuda set up
int *d_rowBuffer, *d_colBuffer, *d_rowInds;
uchar *d_src;
double *d_valBuffer;
CUDA_CHECK(cudaMalloc((void**)&d_rowBuffer, sizeof(int)*window_size*num_window));
CUDA_CHECK(cudaMalloc((void**)&d_colBuffer, sizeof(int)*window_size*num_window));
CUDA_CHECK(cudaMalloc((void**)&d_valBuffer, sizeof(double)*window_size*num_window));
// CUDA_CHECK(cudaMalloc((void**)&d_src, sizeof(double)*imrows*imcols));
CUDA_CHECK(cudaMalloc((void**)&d_src, sizeof(uchar)*c*imrows*imcols));
CUDA_CHECK(cudaMemcpy(d_src, src.data, sizeof(uchar)*c*imrows*imcols, cudaMemcpyDefault));
getCfLaplacianKernel<double>
<<<dim3((imcols+BLOCK_DIM-1)/BLOCK_DIM,(imrows + BLOCK_DIM-1)/BLOCK_DIM),
dim3(BLOCK_DIM, BLOCK_DIM) >>>
(eps, imrows, imcols, d_src, d_rowBuffer, d_colBuffer, d_valBuffer);
CUDA_CHECK(cudaFree(d_src));
cudaDeviceSynchronize();
cusparseDcoodup2coo_compress(\
cusparseHandle, window_size * num_window, imgSize, imgSize, \
d_valBuffer, d_rowBuffer, d_colBuffer, \
vals, &d_rowInds, colInds, nnz);
CUDA_CHECK(cudaFree(d_valBuffer));
CUDA_CHECK(cudaFree(d_colBuffer));
CUDA_CHECK(cudaFree(d_rowBuffer));
// transform into csr format
CUDA_CHECK(cudaMalloc((void**)&(*rowPtrs), sizeof(int)*(imgSize + 1)));
CUDA_CHECK(cusparseXcoo2csr(cusparseHandle, d_rowInds, *nnz, imgSize, *rowPtrs, \
CUSPARSE_INDEX_BASE_ZERO));
CUDA_CHECK(cudaFree(d_rowInds));
}
cv::Mat createIndsMat(const int imrows, const int imcols) {
cv::Mat inds(imrows, imcols, CV_32S);
int imagesize = imrows*imcols;
int *data = (int*)inds.data;
#pragma omp parallel for
for (int i = 0; i < imagesize; ++i) {
data[i] = i;
}
return inds;
}
void pardiso_solve_sym(const MKL_INT numRows, const MKL_INT lhsNnz,
const int* lhsRowPtrs, const int* lhsColInds, const double* lhsVals,
const double *rhsVals, double *res) {
// copy and store the data in MKL_INT so the code can be run under mkl_ilp64
MKL_INT* rowPtrs_t = (MKL_INT*)malloc(sizeof(MKL_INT)*(numRows + 1));
MKL_INT* colInds_t = (MKL_INT*)malloc(sizeof(MKL_INT)*lhsNnz);
for (MKL_INT i = 0; i < lhsNnz; ++i) {
colInds_t[i] = (MKL_INT)lhsColInds[i];
}
for (MKL_INT i = 0; i < numRows + 1; ++i) {
rowPtrs_t[i] = (MKL_INT)lhsRowPtrs[i];
}
MKL_INT mtype = -2; /* Real symmetric matrix */
// MKL_INT mtype = 2; /* real positive symmetric matrix */
MKL_INT nrhs = 1; /* Number of right hand sides. */
void *pt[64];
/* Pardiso control parameters. */
MKL_INT iparm[64];
MKL_INT maxfct, mnum, phase, error = 0, msglvl;
/* Auxiliary variables. */
double ddum; /* Double dummy */
MKL_INT idum; /* Integer dummy. */
// set up parameter
for (MKL_INT i = 0; i < 64; i++)
{
iparm[i] = 0;
}
iparm[0] = 1; /* No solver default */
// iparm[1] = 2; /* Fill-in reordering from METIS */
iparm[1] = 3; /* parallel version of METIS */
iparm[3] = 0; /* No iterative-direct algorithm */
iparm[4] = 0; /* No user fill-in reducing permutation */
iparm[5] = 0; /* Write solution into x */
iparm[7] = 2; /* Max numbers of iterative refinement steps */
iparm[9] = 13; /* Perturb the pivot elements with 1E-13 */
iparm[10] = 1; /* Use nonsymmetric permutation and scaling MPS */
iparm[12] = 0; /* Maximum weighted matching algorithm is switched-off (default for symmetric) */
iparm[13] = 0; /* Output: Number of perturbed pivots */
iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
iparm[18] = -1; /* Output: Mflops for LU factorization */
iparm[19] = 0; /* Output: Numbers of CG Iterations */
iparm[34] = 1; /* set to 0-based index */
maxfct = 1; /* Maximum number of numerical factorizations. */
mnum = 1; /* Which factorization to use. */
msglvl = 0; /* Don't print statistical information in file */
error = 0; /* Initialize error flag */
for (MKL_INT i = 0; i < 64; i++){
pt[i] = 0;
}
// reorder and allocate memory
phase = 11;
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, lhsVals, rowPtrs_t, colInds_t, &idum, &nrhs, iparm, &msglvl, &ddum, &ddum, &error);
if (error != 0){
printf("\nERROR during symbolic factorization: %d", error);
exit(1);
}
// factorization
phase = 22;
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, lhsVals, rowPtrs_t, colInds_t, &idum, &nrhs, iparm, &msglvl, &ddum, &ddum, &error);
if (error != 0){
printf("\nERROR during numerical factorization: %d", error);
exit(2);
}
// Back substitution and iterative refinement.
phase = 33;
iparm[7] = 2; // Max numbers of iterative refinement steps.
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, lhsVals, rowPtrs_t, colInds_t, &idum, &nrhs, iparm, &msglvl, (void*)rhsVals, res, &error);
if (error != 0){
printf("\nERROR during solution: %d", error);
exit(3);
}
// Termination and release of memory.
phase = -1; /* Release internal memory. */
PARDISO(pt, &maxfct, &mnum, &mtype, &phase,
&numRows, &ddum, rowPtrs_t, colInds_t, &idum, &nrhs,
iparm, &msglvl, &ddum, &ddum, &error);
free(rowPtrs_t);
free(colInds_t);
}
void setTRI(cv::String tri_path, const cv::Mat src, cv::Mat &all, cv::Mat &fore) {
// read the image and perform some sanity check
cv::Mat trimap = cv::imread(tri_path, CV_LOAD_IMAGE_COLOR);
if (src.rows != trimap.rows || src.cols != trimap.cols) {
std::cout << "Dimension Not Match" << std::endl;
exit(EXIT_FAILURE);
}
cv::Mat channels[3];
cv::Mat src_tmp;
src.convertTo(src_tmp, CV_64FC3);
trimap.convertTo(trimap, CV_64FC3);
cv::split(src_tmp, channels);
src_tmp = (channels[0] + channels[1] + channels[2]) / 3.0;
cv::split(trimap, channels);
trimap = (channels[0] + channels[1] + channels[2]) / 3.0;
trimap = trimap - src_tmp;
fore = trimap > 0.02;
all = trimap < -0.02 | trimap > 0.02;
fore.convertTo(fore, CV_64FC1);
all.convertTo(all, CV_64FC1);
}
|
ff37fba184216fe59dac676ccd1b8beeab19ac42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include <stdio.h>
#include <iostream>
#define H2D hipMemcpyHostToDevice
#define D2H hipMemcpyDeviceToHost
#define OK hipSuccess
using namespace std;
typedef uint32_t uint;
typedef unsigned char BYTE;
//CPU
uint i = 0, ind = 0;
const uint N = 8E3;
//GPU
__global__ void emptyKernel( BYTE *d_in )
{ printf( "gpu read file:[" );
for ( uint i = 0; i < 4; i++ )
printf( "%X", d_in[ i ] );
printf( "...]\n" );
};
int main( void )
{
// write to file
BYTE outputToFile[ N ]; memset( &outputToFile, 0xAB, N );
uint fileSize = N * sizeof( BYTE );
char fileName[] = "tmpBinaryFile.txt";
ofstream outFile( fileName, ofstream::binary ); //|ofstream::app
outFile.write( ( char* )&outputToFile[ 0 ], fileSize );
outFile.close();
printf( "fileSize : %i\n", fileSize );
printf( "saved HDD file:[" );
for ( i = 0; i < 4; i++ )
printf( "%X", outputToFile[ i ] );
printf( "...]\n" );
// read from file
ifstream inFile( fileName, ifstream::binary );
inFile.seekg( 0, inFile.end ); uint fileSizeRead = inFile.tellg();
printf( "fileSizeRead : %i\n", fileSizeRead );
inFile.seekg( 0, inFile.beg );
BYTE *inputFromFile = ( BYTE* )malloc( fileSizeRead );
inFile.read( ( char* )inputFromFile, fileSizeRead );
inFile.close();
printf( "read HDD file:[" );
for ( i = 0; i < 4; i++ )
printf( "%X", inputFromFile[ i ] );
printf( "...]\n" );
// copy file to GPU
BYTE* d_fromFile[ 1 ];
if ( hipMalloc( ( void** )&d_fromFile[ 0 ], fileSize ) != hipSuccess ) { printf( "hipMalloc err!\n" ); return -1; };
hipMemcpyAsync( d_fromFile[ 0 ], inputFromFile, fileSizeRead, H2D );
hipLaunchKernelGGL(( emptyKernel), dim3(1), dim3(1) , 0, 0, d_fromFile[ 0 ] );
delete( inputFromFile );
hipFree( d_fromFile[ 0 ] );
return 0;
}; //end of main()
| ff37fba184216fe59dac676ccd1b8beeab19ac42.cu | #include <fstream>
#include <stdio.h>
#include <iostream>
#define H2D cudaMemcpyHostToDevice
#define D2H cudaMemcpyDeviceToHost
#define OK cudaSuccess
using namespace std;
typedef uint32_t uint;
typedef unsigned char BYTE;
//CPU
uint i = 0, ind = 0;
const uint N = 8E3;
//GPU
__global__ void emptyKernel( BYTE *d_in )
{ printf( "gpu read file:[" );
for ( uint i = 0; i < 4; i++ )
printf( "%X", d_in[ i ] );
printf( "...]\n" );
};
int main( void )
{
// write to file
BYTE outputToFile[ N ]; memset( &outputToFile, 0xAB, N );
uint fileSize = N * sizeof( BYTE );
char fileName[] = "tmpBinaryFile.txt";
ofstream outFile( fileName, ofstream::binary ); //|ofstream::app
outFile.write( ( char* )&outputToFile[ 0 ], fileSize );
outFile.close();
printf( "fileSize : %i\n", fileSize );
printf( "saved HDD file:[" );
for ( i = 0; i < 4; i++ )
printf( "%X", outputToFile[ i ] );
printf( "...]\n" );
// read from file
ifstream inFile( fileName, ifstream::binary );
inFile.seekg( 0, inFile.end ); uint fileSizeRead = inFile.tellg();
printf( "fileSizeRead : %i\n", fileSizeRead );
inFile.seekg( 0, inFile.beg );
BYTE *inputFromFile = ( BYTE* )malloc( fileSizeRead );
inFile.read( ( char* )inputFromFile, fileSizeRead );
inFile.close();
printf( "read HDD file:[" );
for ( i = 0; i < 4; i++ )
printf( "%X", inputFromFile[ i ] );
printf( "...]\n" );
// copy file to GPU
BYTE* d_fromFile[ 1 ];
if ( cudaMalloc( ( void** )&d_fromFile[ 0 ], fileSize ) != cudaSuccess ) { printf( "cudaMalloc err!\n" ); return -1; };
cudaMemcpyAsync( d_fromFile[ 0 ], inputFromFile, fileSizeRead, H2D );
emptyKernel<<< 1, 1 >>>( d_fromFile[ 0 ] );
delete( inputFromFile );
cudaFree( d_fromFile[ 0 ] );
return 0;
}; //end of main()
|
37b95292f2aeb0f4f0d4432d7e8f702ad1fb8640.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cmath>
#include <dlib/image_transforms.h>
#include "textify.h"
#include "cuda_helpers.h"
static __global__ void gpu_gaussian_blur_h(
textify::gpu_image src,
textify::gpu_image dst,
textify::gpu_gaussian_filter gpu_filter
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
size_t width = src.width, height = src.height;
if (x >= width || y >= height) {
return;
}
auto *filter = gpu_filter.ptr;
size_t filter_sz = gpu_filter.size;
dlib::rgb_pixel p = src.pixels[y * width + x];
unsigned int r = 0, g = 0, b = 0;
for (long k = filter_sz / -2; k <= filter_sz / 2; ++k) {
if (k + x >= width || k + x < 0) {
continue;
}
dlib::rgb_pixel t = src.pixels[y * width + x + k];
r += t.red * filter[k + filter_sz / 2];
g += t.green * filter[k + filter_sz / 2];
b += t.blue * filter[k + filter_sz / 2];
}
p.red = r / 1024;
p.green = g / 1024;
p.blue = b / 1024;
dst.pixels[y * width + x] = p;
}
static __global__ void gpu_gaussian_blur_v(
textify::gpu_image src,
textify::gpu_image dst,
textify::gpu_gaussian_filter gpu_filter
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
size_t width = src.width, height = src.height;
if (x >= width || y >= height) {
return;
}
auto *filter = gpu_filter.ptr;
size_t filter_sz = gpu_filter.size;
dlib::rgb_pixel p = src.pixels[y * width + x];
unsigned int r = 0, g = 0, b = 0;
for (long k = filter_sz / -2; k <= filter_sz / 2; ++k) {
if (k + y >= height || k + y < 0) {
continue;
}
dlib::rgb_pixel t = src.pixels[(y + k) * width + x];
r += t.red * filter[k + filter_sz / 2];
g += t.green * filter[k + filter_sz / 2];
b += t.blue * filter[k + filter_sz / 2];
}
p.red = r / 1024;
p.green = g / 1024;
p.blue = b / 1024;
dst.pixels[y * width + x] = p;
}
static __global__ void gpu_divide(
textify::gpu_image layer1,
textify::gpu_image layer2
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
size_t width = layer1.width, height = layer1.height;
if (x >= width || y >= height) {
return;
}
dlib::rgb_pixel p1 = layer1.pixels[y * width + x];
dlib::rgb_pixel p2 = layer2.pixels[y * width + x];
unsigned int r, g, b;
r = p1.red * 256 / (p2.red + 1);
g = p1.green * 256 / (p2.green + 1);
b = p1.blue * 256 / (p2.blue + 1);
p2.red = r > 255 ? 255 : r;
p2.green = g > 255 ? 255 : g;
p2.blue = b > 255 ? 255 : b;
layer2.pixels[y * width + x] = p2;
}
namespace textify {
void gaussian_blur(const gpu_image& src, gpu_image& dst, gpu_gaussian_filter filter) {
hipStream_t stream;
hipStreamCreate(&stream);
gpu_image tmp{};
hipMalloc((void **) &(tmp.pixels), src.width * src.height * sizeof(dlib::rgb_pixel));
tmp.width = src.width;
tmp.height = src.height;
dim3 thr_per_block(16, 16);
dim3 blocks_count((src.width + 16) / thr_per_block.x, (src.height + 16) / thr_per_block.y);
hipLaunchKernelGGL(( gpu_gaussian_blur_h) , dim3(blocks_count), dim3(thr_per_block), 0, stream , src, tmp, filter);
hipLaunchKernelGGL(( gpu_gaussian_blur_v) , dim3(blocks_count), dim3(thr_per_block), 0, stream , tmp, dst, filter);
hipStreamSynchronize(stream);
hipFree(tmp.pixels);
hipStreamDestroy(stream);
}
void divide(const gpu_image& layer1, gpu_image& layer2) {
hipStream_t stream;
hipStreamCreate(&stream);
dim3 thr_per_block(16, 16);
dim3 blocks_count((layer1.width + 16) / thr_per_block.x, (layer1.height + 16) / thr_per_block.y);
hipLaunchKernelGGL(( gpu_divide) , dim3(blocks_count), dim3(thr_per_block), 0, stream , layer1, layer2);
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
}
void textify(const gpu_image& src, gpu_image& dst) {
hipStream_t stream;
hipStreamCreate(&stream);
gpu_image tmp{};
hipMalloc((void **) &(tmp.pixels), src.width * src.height * sizeof(dlib::rgb_pixel));
tmp.width = src.width;
tmp.height = src.height;
textify::gpu_gaussian_filter filter = textify::create_gpu_gaussian_filter(40);
dim3 thr_per_block(16, 16);
dim3 blocks_count((src.width + 16) / thr_per_block.x, (src.height + 16) / thr_per_block.y);
hipLaunchKernelGGL(( gpu_gaussian_blur_h) , dim3(blocks_count), dim3(thr_per_block), 0, stream , src, tmp, filter);
hipLaunchKernelGGL(( gpu_gaussian_blur_v) , dim3(blocks_count), dim3(thr_per_block), 0, stream , tmp, dst, filter);
hipLaunchKernelGGL(( gpu_divide) , dim3(blocks_count), dim3(thr_per_block), 0, stream , src, dst);
hipStreamSynchronize(stream);
hipFree(tmp.pixels);
hipFree(filter.ptr);
hipStreamDestroy(stream);
}
} | 37b95292f2aeb0f4f0d4432d7e8f702ad1fb8640.cu | #include <cstdlib>
#include <cmath>
#include <dlib/image_transforms.h>
#include "textify.h"
#include "cuda_helpers.h"
static __global__ void gpu_gaussian_blur_h(
textify::gpu_image src,
textify::gpu_image dst,
textify::gpu_gaussian_filter gpu_filter
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
size_t width = src.width, height = src.height;
if (x >= width || y >= height) {
return;
}
auto *filter = gpu_filter.ptr;
size_t filter_sz = gpu_filter.size;
dlib::rgb_pixel p = src.pixels[y * width + x];
unsigned int r = 0, g = 0, b = 0;
for (long k = filter_sz / -2; k <= filter_sz / 2; ++k) {
if (k + x >= width || k + x < 0) {
continue;
}
dlib::rgb_pixel t = src.pixels[y * width + x + k];
r += t.red * filter[k + filter_sz / 2];
g += t.green * filter[k + filter_sz / 2];
b += t.blue * filter[k + filter_sz / 2];
}
p.red = r / 1024;
p.green = g / 1024;
p.blue = b / 1024;
dst.pixels[y * width + x] = p;
}
static __global__ void gpu_gaussian_blur_v(
textify::gpu_image src,
textify::gpu_image dst,
textify::gpu_gaussian_filter gpu_filter
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
size_t width = src.width, height = src.height;
if (x >= width || y >= height) {
return;
}
auto *filter = gpu_filter.ptr;
size_t filter_sz = gpu_filter.size;
dlib::rgb_pixel p = src.pixels[y * width + x];
unsigned int r = 0, g = 0, b = 0;
for (long k = filter_sz / -2; k <= filter_sz / 2; ++k) {
if (k + y >= height || k + y < 0) {
continue;
}
dlib::rgb_pixel t = src.pixels[(y + k) * width + x];
r += t.red * filter[k + filter_sz / 2];
g += t.green * filter[k + filter_sz / 2];
b += t.blue * filter[k + filter_sz / 2];
}
p.red = r / 1024;
p.green = g / 1024;
p.blue = b / 1024;
dst.pixels[y * width + x] = p;
}
static __global__ void gpu_divide(
textify::gpu_image layer1,
textify::gpu_image layer2
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
size_t width = layer1.width, height = layer1.height;
if (x >= width || y >= height) {
return;
}
dlib::rgb_pixel p1 = layer1.pixels[y * width + x];
dlib::rgb_pixel p2 = layer2.pixels[y * width + x];
unsigned int r, g, b;
r = p1.red * 256 / (p2.red + 1);
g = p1.green * 256 / (p2.green + 1);
b = p1.blue * 256 / (p2.blue + 1);
p2.red = r > 255 ? 255 : r;
p2.green = g > 255 ? 255 : g;
p2.blue = b > 255 ? 255 : b;
layer2.pixels[y * width + x] = p2;
}
namespace textify {
void gaussian_blur(const gpu_image& src, gpu_image& dst, gpu_gaussian_filter filter) {
cudaStream_t stream;
cudaStreamCreate(&stream);
gpu_image tmp{};
cudaMalloc((void **) &(tmp.pixels), src.width * src.height * sizeof(dlib::rgb_pixel));
tmp.width = src.width;
tmp.height = src.height;
dim3 thr_per_block(16, 16);
dim3 blocks_count((src.width + 16) / thr_per_block.x, (src.height + 16) / thr_per_block.y);
gpu_gaussian_blur_h <<< blocks_count, thr_per_block, 0, stream >>> (src, tmp, filter);
gpu_gaussian_blur_v <<< blocks_count, thr_per_block, 0, stream >>> (tmp, dst, filter);
cudaStreamSynchronize(stream);
cudaFree(tmp.pixels);
cudaStreamDestroy(stream);
}
void divide(const gpu_image& layer1, gpu_image& layer2) {
cudaStream_t stream;
cudaStreamCreate(&stream);
dim3 thr_per_block(16, 16);
dim3 blocks_count((layer1.width + 16) / thr_per_block.x, (layer1.height + 16) / thr_per_block.y);
gpu_divide <<< blocks_count, thr_per_block, 0, stream >>> (layer1, layer2);
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
}
void textify(const gpu_image& src, gpu_image& dst) {
cudaStream_t stream;
cudaStreamCreate(&stream);
gpu_image tmp{};
cudaMalloc((void **) &(tmp.pixels), src.width * src.height * sizeof(dlib::rgb_pixel));
tmp.width = src.width;
tmp.height = src.height;
textify::gpu_gaussian_filter filter = textify::create_gpu_gaussian_filter(40);
dim3 thr_per_block(16, 16);
dim3 blocks_count((src.width + 16) / thr_per_block.x, (src.height + 16) / thr_per_block.y);
gpu_gaussian_blur_h <<< blocks_count, thr_per_block, 0, stream >>> (src, tmp, filter);
gpu_gaussian_blur_v <<< blocks_count, thr_per_block, 0, stream >>> (tmp, dst, filter);
gpu_divide <<< blocks_count, thr_per_block, 0, stream >>> (src, dst);
cudaStreamSynchronize(stream);
cudaFree(tmp.pixels);
cudaFree(filter.ptr);
cudaStreamDestroy(stream);
}
} |
62ee075a6bd0d273059da9028f64d07be6da4bb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_right;
int xdim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_right;
int ydim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_right;
int xdim1_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_right;
int ydim1_update_halo_kernel5_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_right*(y)+xdim0_update_halo_kernel5_plus_4_right*ydim0_update_halo_kernel5_plus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_right*(y)+xdim1_update_halo_kernel5_plus_4_right*ydim1_update_halo_kernel5_plus_4_right*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(-4,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(-4,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,90)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(90,"update_halo_kernel5_plus_4_right");
OPS_kernels[90].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_right_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_right_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_right_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[90].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[90].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[90].mpi_time += t2-t1;
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 90;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 90;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(90,"update_halo_kernel5_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 62ee075a6bd0d273059da9028f64d07be6da4bb8.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_right;
int xdim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_right;
int ydim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_right;
int xdim1_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_right;
int ydim1_update_halo_kernel5_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_right*(y)+xdim0_update_halo_kernel5_plus_4_right*ydim0_update_halo_kernel5_plus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_right*(y)+xdim1_update_halo_kernel5_plus_4_right*ydim1_update_halo_kernel5_plus_4_right*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(-4,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(-4,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,90)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(90,"update_halo_kernel5_plus_4_right");
OPS_kernels[90].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_right_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_right_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_right_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[90].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_4_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[90].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[90].mpi_time += t2-t1;
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 90;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 90;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(90,"update_halo_kernel5_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
698281fce426be7608004cb8b0e2d22f818f3689.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
__global__
void zgeadd_full(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_zgeadd(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( zgeadd_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, dB, lddb );
}
| 698281fce426be7608004cb8b0e2d22f818f3689.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset.
*/
__global__
void zgeadd_full(
int m, int n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX_16
The scalar alpha.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_zgeadd(
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
zgeadd_full<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, dA, ldda, dB, lddb );
}
|
3675da7d335c08c2e4ba569e5408720c11dfd79c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::batch_norm(const Tensor& input,
bool relu)
{
assert(input.numDim == 4); //Only support 4D BN for now
BatchNorm *bn = new BatchNorm(*this, input, relu);
layers.push_back(bn);
return bn->outputs[0];
}
/*
locals[0] = scale
locals[1] = bias
*/
BatchNorm::BatchNorm(FFModel& model,
const Tensor& _input,
bool _relu)
: Op(model, OP_BATCHNORM, "BatchNorm", _input), relu(_relu), profiling(model.config.profiling)
{
Context ctx = model.config.lg_ctx;
HighLevelRuntime* runtime = model.config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
num_replica = part_rect.volume();
// Create output tensor
int output_w = _input.adim[0];
int output_h = _input.adim[1];
int output_c = _input.adim[2];
int output_n = _input.adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
FieldSpace fs = model.config.field_space;
Rect<4> output_rect(Point<4>(0, 0, 0, 0),
Point<4>(output_w-1, output_h-1, output_c-1, output_n-1));
IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
int extent_w = (output_w + num_par_w - 1) / num_par_w;
int extent_h = (output_h + num_par_h - 1) / num_par_h;
int extent_c = output_c / num_par_c;
int extent_n = output_n / num_par_n;
assert(output_c % num_par_c == 0);
assert(output_n % num_par_n == 0);
Rect<4> ext(Point<4>(0, 0, 0, 0),
Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1));
Transform<4, 4, coord_t> trans;
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
trans[i][j] = 0;
trans[0][0] = extent_w;
trans[1][1] = extent_h;
trans[2][2] = extent_c;
trans[3][3] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp =
runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
int bias_nc = num_replica * _input.adim[2]; /*input_channels*/
Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1);
Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1);
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
LogicalRegion scale_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
IndexPartition bias_grad_ip =
runtime->create_equal_partition(ctx, bias_grad_is, task_is);
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
LogicalPartition scale_grad_lp =
runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip);
Parameter scale_tensor, bias_tensor;
scale_tensor.region = scale_lr;
scale_tensor.region_grad = scale_grad_lr;
scale_tensor.part = LogicalPartition::NO_PART;
scale_tensor.part_grad = scale_grad_lp;
weights[0] = scale_tensor;
bias_tensor.region = bias_lr;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part = LogicalPartition::NO_PART;
bias_tensor.part_grad = bias_grad_lp;
weights[1] = bias_tensor;
numWeights = 2;
outputs[0] = _input;
outputs[0].region = output_lr;
outputs[0].part = output_lp;
outputs[0].region_grad = output_grad_lr;
outputs[0].part_grad = output_grad_lp;
printf("Create bn layer: output(%d %d %d %d)\n",
outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]);
input_lps[0] = _input.part;
}
void BatchNorm::create_weights(FFModel& model)
{
// TODO
assert(false);
}
void BatchNorm::create_output_and_partition(FFModel& model)
{
// TODO
assert(false);
}
/*
regions[0]: input
regions[1]: output
regions[2](I): scale
regions[3](I): bias
*/
__host__
OpMeta* BatchNorm::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const BatchNorm* bm = (BatchNorm*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
BatchNormMeta* m = new BatchNormMeta(handle);
m->relu = bm->relu;
m->mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION >= 7000
m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor));
assert(acc_input.rect == acc_output.rect);
int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1;
int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1;
int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c,
input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
output_n, output_c,
output_h, output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1, output_c, 1, 1));
//float *runningMean, *runningVar, *saveMean, *saveVar;
checkCUDA(hipMalloc(&m->runningMean, sizeof(float) * output_c));
checkCUDA(hipMalloc(&m->runningVar, sizeof(float) * output_c));
checkCUDA(hipMalloc(&m->saveMean, sizeof(float) * output_c));
checkCUDA(hipMalloc(&m->saveVar, sizeof(float) * output_c));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
return m;
}
/*
regions[0](O): scale, initilized to ones
regions[1](O): bias, initilized to zeros
*/
__host__
void BatchNorm::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const BatchNorm* bm = (BatchNorm*) task->args;
const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_scale, rect_bias;
rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init kernel and bias
#ifdef PARAMETER_ALL_ONES
hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
scale_ptr, rect_scale.volume());
hipLaunchKernelGGL(( ones_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
bias_ptr, rect_bias.volume());
#else
//hipStream_t stream;
//checkCUDA(hipStreamCreate(&stream));
//hiprandGenerator_t genGPU;
//hiprandCreateGenerator(&genGPU, HIPRAND_RNG_PSEUDO_DEFAULT);
//hiprandSetStream(genGPU, stream);
//hiprandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
//hiprandGenerateUniform(genGPU, scale_ptr, rect_scale.volume());
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_scale.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
scale_ptr, rect_scale.volume(), 1.0f);
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(rect_bias.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
bias_ptr, rect_bias.volume(), 0.0f);
//hiprandDestroyGenerator(genGPU);
#endif
}
__host__
void BatchNorm::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
// First we initialize the scale and bias parameters
{
TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0));
para_launcher.add_region_requirement(
RegionRequirement(weights[0].region, WRITE_DISCARD, EXCLUSIVE, weights[0].region));
para_launcher.add_field(0, FID_DATA);
para_launcher.add_region_requirement(
RegionRequirement(weights[1].region, WRITE_DISCARD, EXCLUSIVE, weights[1].region));
para_launcher.add_field(1, FID_DATA);
runtime->execute_task(ctx, para_launcher);
}
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
init_launcher.add_field(1, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
init_launcher.add_field(2, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
init_launcher.add_field(3, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): ouptut
regions[2](I): scale
regions[3](I): bias
*/
__host__
void BatchNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
hipEvent_t t_start, t_end;
if (bm->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
coord_t numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningMean, numChannels, 0.0f);
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(numChannels)), dim3(CUDA_NUM_THREADS), 0, 0, m->runningVar, numChannels, 0.0f);
checkCUDNN(cudnnBatchNormalizationForwardTraining(
m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, acc_input.ptr,
m->outputTensor, acc_output.ptr, m->biasTensor, acc_scale.ptr, acc_bias.ptr,
1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("BatchNorm forward time (BF) = %.2fms\n", elapsed);
}
}
__host__
void BatchNorm::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): scale
regions[5](I/O): scale_grad
regions[6](I/O): bias_grad
*/
__host__
void BatchNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f;
//float beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorRW<float, 4> acc_input_grad(regions[1], FID_DATA);
const AccessorRO<float, 4> acc_output(regions[2], FID_DATA);
const AccessorRW<float, 4> acc_output_grad(regions[3], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA);
const AccessorRW<float, 1> acc_scale_grad(regions[5], FID_DATA);
const AccessorRW<float, 1> acc_bias_grad(regions[6], FID_DATA);
Rect<4> rect_input, rect_input_grad, rect_output, rect_output_grad;
Rect<1> rect_scale, rect_scale_grad, rect_bias_grad;
rect_input =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_input_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_output =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_output_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_scale =
runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_scale_grad =
runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
// make sure all regions are dense
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo);
const float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo);
float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
hipEvent_t t_start, t_end;
if (bm->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
if (m->relu) {
int n = rect_output.volume();
hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, n);
}
checkCUDNN(cudnnBatchNormalizationBackward(
m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha,
m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr,
m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr,
scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("BatchNorm backward time = %.2fms\n", elapsed);
}
#endif
}
__host__
void BatchNorm::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
}
bool BatchNorm::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
| 3675da7d335c08c2e4ba569e5408720c11dfd79c.cu | /* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::batch_norm(const Tensor& input,
bool relu)
{
assert(input.numDim == 4); //Only support 4D BN for now
BatchNorm *bn = new BatchNorm(*this, input, relu);
layers.push_back(bn);
return bn->outputs[0];
}
/*
locals[0] = scale
locals[1] = bias
*/
BatchNorm::BatchNorm(FFModel& model,
const Tensor& _input,
bool _relu)
: Op(model, OP_BATCHNORM, "BatchNorm", _input), relu(_relu), profiling(model.config.profiling)
{
Context ctx = model.config.lg_ctx;
HighLevelRuntime* runtime = model.config.lg_hlr;
Rect<4> part_rect = runtime->get_index_space_domain(ctx, task_is);
num_replica = part_rect.volume();
// Create output tensor
int output_w = _input.adim[0];
int output_h = _input.adim[1];
int output_c = _input.adim[2];
int output_n = _input.adim[3];
int num_par_w = part_rect.hi[0] - part_rect.lo[0] + 1;
int num_par_h = part_rect.hi[1] - part_rect.lo[1] + 1;
int num_par_c = part_rect.hi[2] - part_rect.lo[2] + 1;
int num_par_n = part_rect.hi[3] - part_rect.lo[3] + 1;
FieldSpace fs = model.config.field_space;
Rect<4> output_rect(Point<4>(0, 0, 0, 0),
Point<4>(output_w-1, output_h-1, output_c-1, output_n-1));
IndexSpaceT<4> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
int extent_w = (output_w + num_par_w - 1) / num_par_w;
int extent_h = (output_h + num_par_h - 1) / num_par_h;
int extent_c = output_c / num_par_c;
int extent_n = output_n / num_par_n;
assert(output_c % num_par_c == 0);
assert(output_n % num_par_n == 0);
Rect<4> ext(Point<4>(0, 0, 0, 0),
Point<4>(extent_w-1, extent_h-1, extent_c-1, extent_n-1));
Transform<4, 4, coord_t> trans;
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
trans[i][j] = 0;
trans[0][0] = extent_w;
trans[1][1] = extent_h;
trans[2][2] = extent_c;
trans[3][3] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, trans, ext);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp =
runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
int bias_nc = num_replica * _input.adim[2]; /*input_channels*/
Rect<1, coord_t> bias_grad_rect(0, bias_nc - 1);
Rect<1, coord_t> bias_rect(0, _input.adim[2] - 1);
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<1> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion scale_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
LogicalRegion scale_grad_lr =
runtime->create_logical_region(ctx, bias_grad_is, fs);
IndexPartition bias_grad_ip =
runtime->create_equal_partition(ctx, bias_grad_is, task_is);
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
LogicalPartition scale_grad_lp =
runtime->get_logical_partition(ctx, scale_grad_lr, bias_grad_ip);
Parameter scale_tensor, bias_tensor;
scale_tensor.region = scale_lr;
scale_tensor.region_grad = scale_grad_lr;
scale_tensor.part = LogicalPartition::NO_PART;
scale_tensor.part_grad = scale_grad_lp;
weights[0] = scale_tensor;
bias_tensor.region = bias_lr;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part = LogicalPartition::NO_PART;
bias_tensor.part_grad = bias_grad_lp;
weights[1] = bias_tensor;
numWeights = 2;
outputs[0] = _input;
outputs[0].region = output_lr;
outputs[0].part = output_lp;
outputs[0].region_grad = output_grad_lr;
outputs[0].part_grad = output_grad_lp;
printf("Create bn layer: output(%d %d %d %d)\n",
outputs[0].adim[3], outputs[0].adim[2], outputs[0].adim[1], outputs[0].adim[0]);
input_lps[0] = _input.part;
}
void BatchNorm::create_weights(FFModel& model)
{
// TODO
assert(false);
}
void BatchNorm::create_output_and_partition(FFModel& model)
{
// TODO
assert(false);
}
/*
regions[0]: input
regions[1]: output
regions[2](I): scale
regions[3](I): bias
*/
__host__
OpMeta* BatchNorm::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const BatchNorm* bm = (BatchNorm*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
BatchNormMeta* m = new BatchNormMeta(handle);
m->relu = bm->relu;
m->mode = CUDNN_BATCHNORM_SPATIAL;
#if CUDNN_VERSION >= 7000
m->mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#endif
checkCUDNN(cudnnCreateTensorDescriptor(&m->inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&m->biasTensor));
assert(acc_input.rect == acc_output.rect);
int input_w = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1;
int input_h = acc_input.rect.hi[1] - acc_input.rect.lo[1] + 1;
int input_c = acc_input.rect.hi[2] - acc_input.rect.lo[2] + 1;
int input_n = acc_input.rect.hi[3] - acc_input.rect.lo[3] + 1;
int output_w = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1;
int output_h = acc_output.rect.hi[1] - acc_output.rect.lo[1] + 1;
int output_c = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
int output_n = acc_output.rect.hi[3] - acc_output.rect.lo[3] + 1;
checkCUDNN(cudnnSetTensor4dDescriptor(m->inputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
input_n, input_c,
input_h, input_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
output_n, output_c,
output_h, output_w));
checkCUDNN(cudnnSetTensor4dDescriptor(m->biasTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1, output_c, 1, 1));
//float *runningMean, *runningVar, *saveMean, *saveVar;
checkCUDA(cudaMalloc(&m->runningMean, sizeof(float) * output_c));
checkCUDA(cudaMalloc(&m->runningVar, sizeof(float) * output_c));
checkCUDA(cudaMalloc(&m->saveMean, sizeof(float) * output_c));
checkCUDA(cudaMalloc(&m->saveVar, sizeof(float) * output_c));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
}
return m;
}
/*
regions[0](O): scale, initilized to ones
regions[1](O): bias, initilized to zeros
*/
__host__
void BatchNorm::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const BatchNorm* bm = (BatchNorm*) task->args;
const AccessorWO<float, 1> acc_scale(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_scale, rect_bias;
rect_scale = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init kernel and bias
#ifdef PARAMETER_ALL_ONES
ones_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>(
scale_ptr, rect_scale.volume());
ones_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>(
bias_ptr, rect_bias.volume());
#else
//cudaStream_t stream;
//checkCUDA(cudaStreamCreate(&stream));
//curandGenerator_t genGPU;
//curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT);
//curandSetStream(genGPU, stream);
//curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
//curandGenerateUniform(genGPU, scale_ptr, rect_scale.volume());
assign_kernel<<<GET_BLOCKS(rect_scale.volume()), CUDA_NUM_THREADS>>>(
scale_ptr, rect_scale.volume(), 1.0f);
assign_kernel<<<GET_BLOCKS(rect_bias.volume()), CUDA_NUM_THREADS>>>(
bias_ptr, rect_bias.volume(), 0.0f);
//curandDestroyGenerator(genGPU);
#endif
}
__host__
void BatchNorm::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
// First we initialize the scale and bias parameters
{
TaskLauncher para_launcher(BATCHNORM_INIT_PARA_TASK_ID, TaskArgument(NULL, 0));
para_launcher.add_region_requirement(
RegionRequirement(weights[0].region, WRITE_DISCARD, EXCLUSIVE, weights[0].region));
para_launcher.add_field(0, FID_DATA);
para_launcher.add_region_requirement(
RegionRequirement(weights[1].region, WRITE_DISCARD, EXCLUSIVE, weights[1].region));
para_launcher.add_field(1, FID_DATA);
runtime->execute_task(ctx, para_launcher);
}
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher init_launcher(BATCHNORM_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
init_launcher.add_field(1, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
init_launcher.add_field(2, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
init_launcher.add_field(3, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I): input
regions[1](O): ouptut
regions[2](I): scale
regions[3](I): bias
*/
__host__
void BatchNorm::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
TensorAccessorR<float, 4> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 4> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_scale(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
TensorAccessorR<float, 1> acc_bias(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
cudaEvent_t t_start, t_end;
if (bm->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
coord_t numChannels = acc_output.rect.hi[2] - acc_output.rect.lo[2] + 1;
assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningMean, numChannels, 0.0f);
assign_kernel<<<GET_BLOCKS(numChannels), CUDA_NUM_THREADS>>>(m->runningVar, numChannels, 0.0f);
checkCUDNN(cudnnBatchNormalizationForwardTraining(
m->handle.dnn, m->mode, &alpha, &beta, m->inputTensor, acc_input.ptr,
m->outputTensor, acc_output.ptr, m->biasTensor, acc_scale.ptr, acc_bias.ptr,
1.0, m->runningMean, m->runningVar, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("BatchNorm forward time (BF) = %.2fms\n", elapsed);
}
}
__host__
void BatchNorm::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(weights[1].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[1].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](I/O): input_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): scale
regions[5](I/O): scale_grad
regions[6](I/O): bias_grad
*/
__host__
void BatchNorm::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f;
//float beta = 0.0f;
const BatchNorm* bm = (BatchNorm*) task->args;
const BatchNormMeta* m = *((BatchNormMeta**) task->local_args);
const AccessorRO<float, 4> acc_input(regions[0], FID_DATA);
const AccessorRW<float, 4> acc_input_grad(regions[1], FID_DATA);
const AccessorRO<float, 4> acc_output(regions[2], FID_DATA);
const AccessorRW<float, 4> acc_output_grad(regions[3], FID_DATA);
const AccessorRO<float, 1> acc_scale(regions[4], FID_DATA);
const AccessorRW<float, 1> acc_scale_grad(regions[5], FID_DATA);
const AccessorRW<float, 1> acc_bias_grad(regions[6], FID_DATA);
Rect<4> rect_input, rect_input_grad, rect_output, rect_output_grad;
Rect<1> rect_scale, rect_scale_grad, rect_bias_grad;
rect_input =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_input_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_output =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_output_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_scale =
runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_scale_grad =
runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
// make sure all regions are dense
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad));
assert(acc_scale.accessor.is_dense_arbitrary(rect_scale));
assert(acc_scale_grad.accessor.is_dense_arbitrary(rect_scale_grad));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo);
const float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo);
const float *scale_ptr = acc_scale.ptr(rect_scale.lo);
float *scale_grad_ptr = acc_scale_grad.ptr(rect_scale_grad.lo);
float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
cudaEvent_t t_start, t_end;
if (bm->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
if (m->relu) {
int n = rect_output.volume();
reluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, n);
}
checkCUDNN(cudnnBatchNormalizationBackward(
m->handle.dnn, m->mode, &alpha, &alpha, &alpha, &alpha,
m->inputTensor, input_ptr, m->outputTensor, output_grad_ptr,
m->inputTensor, input_grad_ptr, m->biasTensor, scale_ptr,
scale_grad_ptr, bias_grad_ptr, CUDNN_BN_MIN_EPSILON,
m->saveMean, m->saveVar));
if (bm->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("BatchNorm backward time = %.2fms\n", elapsed);
}
#endif
}
__host__
void BatchNorm::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<4> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<4> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(BATCHNORM_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(BatchNorm)), argmap);
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](I/O): input_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(weights[0].region, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, weights[0].region));
launcher.add_field(4, FID_DATA);
// regions[5](I/O): filter_grad
launcher.add_region_requirement(
RegionRequirement(weights[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[0].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](I/O): bias_grad
launcher.add_region_requirement(
RegionRequirement(weights[1].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, weights[1].region_grad));
launcher.add_field(6, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
}
bool BatchNorm::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.