hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6eac8a90300de3c45c3c5863757d292fd7405aed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
int TILE_WIDTH = 32;
__shared__ float Mds[32][32];
__shared__ float Nds[32][32];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0.0;
for (int k =0 ; k < (TILE_WIDTH + M.width - 1) / TILE_WIDTH; k++)
{
if (Row < M.height && k*TILE_WIDTH+tx < M.width )
Mds[ty][tx] = M.elements[Row*M.width + k*TILE_WIDTH+tx];
else
Mds[ty][tx] = 0.0;
if (Col < N.width && k*TILE_WIDTH+ty < N.height)
Nds[ty][tx] = N.elements[(k*TILE_WIDTH+ty)*N.width + Col];
else
Nds[ty][tx] = 0.0;
__syncthreads();
for (int i = 0; i < TILE_WIDTH; i++)
Pvalue += Mds[ty][i] * Nds[i][tx];
__syncthreads();
}
if (Row < P.height && Col < P.width)
P.elements[Row*P.width+Col] = Pvalue;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
6eac8a90300de3c45c3c5863757d292fd7405aed.cu
|
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
int TILE_WIDTH = 32;
__shared__ float Mds[32][32];
__shared__ float Nds[32][32];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0.0;
for (int k =0 ; k < (TILE_WIDTH + M.width - 1) / TILE_WIDTH; k++)
{
if (Row < M.height && k*TILE_WIDTH+tx < M.width )
Mds[ty][tx] = M.elements[Row*M.width + k*TILE_WIDTH+tx];
else
Mds[ty][tx] = 0.0;
if (Col < N.width && k*TILE_WIDTH+ty < N.height)
Nds[ty][tx] = N.elements[(k*TILE_WIDTH+ty)*N.width + Col];
else
Nds[ty][tx] = 0.0;
__syncthreads();
for (int i = 0; i < TILE_WIDTH; i++)
Pvalue += Mds[ty][i] * Nds[i][tx];
__syncthreads();
}
if (Row < P.height && Col < P.width)
P.elements[Row*P.width+Col] = Pvalue;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
32a839382340c4103dda6e872157388b9a47d792.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr unsigned int init_threads_per_group = 32;
constexpr unsigned int init_groups_per_block = 4;
constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block;
__global__ void __launch_bounds__(init_threads_per_block)
gpu_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds)
{
__shared__ __align__(4) statistics_group group_g[init_groups_per_block];
uint32_t const col_id = blockIdx.y;
uint32_t const chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y;
uint32_t const t = threadIdx.x;
auto const num_rowgroups = rowgroup_bounds.size().first;
statistics_group* group = &group_g[threadIdx.y];
if (chunk_id < num_rowgroups and t == 0) {
group->col = &cols[col_id];
group->start_row = rowgroup_bounds[chunk_id][col_id].begin;
group->num_rows = rowgroup_bounds[chunk_id][col_id].size();
groups[col_id * num_rowgroups + chunk_id] = *group;
}
}
/**
* @brief Get the buffer size and offsets of encoded statistics
*
* @param[in,out] groups Statistics merge groups
* @param[in] statistics_count Number of statistics buffers
*/
constexpr unsigned int buffersize_reduction_dim = 32;
constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim;
constexpr unsigned int pb_fld_hdrlen = 1;
constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length
constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length
constexpr unsigned int pb_fldlen_int64 = 10;
constexpr unsigned int pb_fldlen_float64 = 8;
constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters
constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64;
constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64;
template <unsigned int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpu_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ typename block_scan::TempStorage temp_storage;
volatile uint32_t stats_size = 0;
uint32_t t = threadIdx.x;
__syncthreads();
for (uint32_t start = 0; start < statistics_count; start += block_size) {
uint32_t stats_len = 0, stats_pos;
uint32_t idx = start + t;
if (idx < statistics_count) {
const stats_column_desc* col = groups[idx].col;
statistics_dtype dtype = col->stats_dtype;
switch (dtype) {
case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break;
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_date32:
case dtype_int64:
case dtype_timestamp64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64);
break;
case dtype_float32:
case dtype_float64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64);
break;
case dtype_decimal64:
case dtype_decimal128:
stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal);
break;
case dtype_string:
stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) +
chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length;
break;
case dtype_none: stats_len = pb_fldlen_common;
default: break;
}
}
uint32_t tmp_stats_size;
block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size);
stats_pos += stats_size;
stats_size += tmp_stats_size;
if (idx < statistics_count) {
groups[idx].start_chunk = stats_pos;
groups[idx].num_chunks = stats_len;
}
__syncthreads();
}
}
struct stats_state_s {
uint8_t* base; ///< Output buffer start
uint8_t* end; ///< Output buffer end
statistics_chunk chunk;
statistics_merge_group group;
stats_column_desc col;
// ORC stats
uint64_t numberOfValues;
uint8_t hasNull;
};
/*
* Protobuf encoding - see
* https://developers.google.com/protocol-buffers/docs/encoding
*/
// Protobuf varint encoding for unsigned int
__device__ inline uint8_t* pb_encode_uint(uint8_t* p, uint64_t v)
{
while (v > 0x7f) {
*p++ = ((uint32_t)v | 0x80);
v >>= 7;
}
*p++ = v;
return p;
}
// Protobuf field encoding for unsigned int
__device__ inline uint8_t* pb_put_uint(uint8_t* p, uint32_t id, uint64_t v)
{
p[0] = id * 8 + static_cast<ProtofType>(ProtofType::VARINT); // NOTE: Assumes id < 16
return pb_encode_uint(p + 1, v);
}
// Protobuf field encoding for signed int
__device__ inline uint8_t* pb_put_int(uint8_t* p, uint32_t id, int64_t v)
{
int64_t s = (v < 0);
return pb_put_uint(p, id, (v ^ -s) * 2 + s);
}
// Protobuf field encoding for 'packed' unsigned int (single value)
__device__ inline uint8_t* pb_put_packed_uint(uint8_t* p, uint32_t id, uint64_t v)
{
uint8_t* p2 = pb_encode_uint(p + 2, v);
p[0] = id * 8 + ProtofType::FIXEDLEN;
p[1] = static_cast<uint8_t>(p2 - (p + 2));
return p2;
}
// Protobuf field encoding for binary/string
__device__ inline uint8_t* pb_put_binary(uint8_t* p, uint32_t id, const void* bytes, uint32_t len)
{
p[0] = id * 8 + ProtofType::FIXEDLEN;
p = pb_encode_uint(p + 1, len);
memcpy(p, bytes, len);
return p + len;
}
// Protobuf field encoding for 64-bit raw encoding (double)
__device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, const void* raw64)
{
p[0] = id * 8 + ProtofType::FIXED64;
memcpy(p + 1, raw64, 8);
return p + 9;
}
/**
* @brief Encode statistics in ORC protobuf format
*
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*
* ORC statistics format from https://orc.apache.org/specification/ORCv1/
*
* message ColumnStatistics {
* // the number of values
* optional uint64 numberOfValues = 1;
* // At most one of these has a value for any column
* optional IntegerStatistics intStatistics = 2;
* optional DoubleStatistics doubleStatistics = 3;
* optional StringStatistics stringStatistics = 4;
* optional BucketStatistics bucketStatistics = 5;
* optional DecimalStatistics decimalStatistics = 6;
* optional DateStatistics dateStatistics = 7;
* optional BinaryStatistics binaryStatistics = 8;
* optional TimestampStatistics timestampStatistics = 9;
* optional bool hasNull = 10;
* }
*/
constexpr unsigned int encode_threads_per_chunk = 32;
constexpr unsigned int encode_chunks_per_block = 4;
constexpr unsigned int encode_threads_per_block =
encode_threads_per_chunk * encode_chunks_per_block;
__global__ void __launch_bounds__(encode_threads_per_block)
gpu_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
__shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block];
uint32_t t = threadIdx.x;
uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y;
stats_state_s* const s = &state_g[threadIdx.y];
// Encode and update actual bfr size
if (idx < statistics_count && t == 0) {
s->chunk = chunks[idx];
s->group = groups[idx];
s->col = *(s->group.col);
s->base = blob_bfr + s->group.start_chunk;
s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks;
uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls);
uint8_t* fld_start = cur;
switch (s->col.stats_dtype) {
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_int64:
// intStatistics = 2
// message IntegerStatistics {
// optional sint64 minimum = 1;
// optional sint64 maximum = 2;
// optional sint64 sum = 3;
// }
if (s->chunk.has_minmax || s->chunk.has_sum) {
*cur = 2 * 8 + ProtofType::FIXEDLEN;
cur += 2;
if (s->chunk.has_minmax) {
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
}
if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); }
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_float32:
case dtype_float64:
// doubleStatistics = 3
// message DoubleStatistics {
// optional double minimum = 1;
// optional double maximum = 2;
// optional double sum = 3;
// }
if (s->chunk.has_minmax) {
*cur = 3 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val);
cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_string:
// stringStatistics = 4
// message StringStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional sint64 sum = 3; // sum will store the total length of all strings
// }
if (s->chunk.has_minmax && s->chunk.has_sum) {
uint32_t sz = (pb_put_uint(cur, 3, s->chunk.sum.i_val) - cur) +
(pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) +
(pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) +
s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length;
cur[0] = 4 * 8 + ProtofType::FIXEDLEN;
cur = pb_encode_uint(cur + 1, sz);
cur = pb_put_binary(
cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length);
cur = pb_put_binary(
cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length);
cur = pb_put_uint(cur, 3, s->chunk.sum.i_val);
}
break;
case dtype_bool:
// bucketStatistics = 5
// message BucketStatistics {
// repeated uint64 count = 1 [packed=true];
// }
if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values
cur[0] = 5 * 8 + ProtofType::FIXEDLEN;
cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.u_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_decimal64:
case dtype_decimal128:
// decimalStatistics = 6
// message DecimalStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional string sum = 3;
// }
if (s->chunk.has_minmax) {
// TODO: Decimal support (decimal min/max stored as strings)
}
break;
case dtype_date32:
// dateStatistics = 7
// message DateStatistics { // min,max values saved as days since epoch
// optional sint32 minimum = 1;
// optional sint32 maximum = 2;
// }
if (s->chunk.has_minmax) {
cur[0] = 7 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_timestamp64:
// timestampStatistics = 9
// message TimestampStatistics {
// optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch
// optional sint64 maximum = 2;
// optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch
// optional sint64 maximumUtc = 4;
// }
if (s->chunk.has_minmax) {
cur[0] = 9 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc
cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc
fld_start[1] = cur - (fld_start + 2);
}
break;
default: break;
}
groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base);
}
}
void orc_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
rmm::cuda_stream_view stream)
{
dim3 dim_grid((rowgroup_bounds.size().first + init_groups_per_block - 1) / init_groups_per_block,
rowgroup_bounds.size().second);
dim3 dim_block(init_threads_per_group, init_groups_per_block);
hipLaunchKernelGGL(( gpu_init_statistics_groups), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
groups, cols, rowgroup_bounds);
}
/**
* @brief Launches kernels to return statistics buffer offsets and sizes
*
* @param[in,out] groups Statistics merge groups
* @param[in] chunks Statistics chunks
* @param[in] statistics_count Number of statistics buffers to encode
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*/
void orc_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
hipLaunchKernelGGL(( gpu_init_statistics_buffersize<block_size>)
, dim3(1), dim3(block_size), 0, stream.value(), groups, chunks, statistics_count);
}
/**
* @brief Launches kernel to encode statistics in ORC protobuf format
*
* @param[out] blob_bfr Output buffer for statistics blobs
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*/
void orc_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
unsigned int num_blocks =
(statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block;
dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block);
hipLaunchKernelGGL(( gpu_encode_statistics), dim3(num_blocks), dim3(dim_block), 0, stream.value(),
blob_bfr, groups, chunks, statistics_count);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
32a839382340c4103dda6e872157388b9a47d792.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr unsigned int init_threads_per_group = 32;
constexpr unsigned int init_groups_per_block = 4;
constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block;
__global__ void __launch_bounds__(init_threads_per_block)
gpu_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds)
{
__shared__ __align__(4) statistics_group group_g[init_groups_per_block];
uint32_t const col_id = blockIdx.y;
uint32_t const chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y;
uint32_t const t = threadIdx.x;
auto const num_rowgroups = rowgroup_bounds.size().first;
statistics_group* group = &group_g[threadIdx.y];
if (chunk_id < num_rowgroups and t == 0) {
group->col = &cols[col_id];
group->start_row = rowgroup_bounds[chunk_id][col_id].begin;
group->num_rows = rowgroup_bounds[chunk_id][col_id].size();
groups[col_id * num_rowgroups + chunk_id] = *group;
}
}
/**
* @brief Get the buffer size and offsets of encoded statistics
*
* @param[in,out] groups Statistics merge groups
* @param[in] statistics_count Number of statistics buffers
*/
constexpr unsigned int buffersize_reduction_dim = 32;
constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim;
constexpr unsigned int pb_fld_hdrlen = 1;
constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length
constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length
constexpr unsigned int pb_fldlen_int64 = 10;
constexpr unsigned int pb_fldlen_float64 = 8;
constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters
constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64;
constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64;
template <unsigned int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpu_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ typename block_scan::TempStorage temp_storage;
volatile uint32_t stats_size = 0;
uint32_t t = threadIdx.x;
__syncthreads();
for (uint32_t start = 0; start < statistics_count; start += block_size) {
uint32_t stats_len = 0, stats_pos;
uint32_t idx = start + t;
if (idx < statistics_count) {
const stats_column_desc* col = groups[idx].col;
statistics_dtype dtype = col->stats_dtype;
switch (dtype) {
case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break;
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_date32:
case dtype_int64:
case dtype_timestamp64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64);
break;
case dtype_float32:
case dtype_float64:
stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64);
break;
case dtype_decimal64:
case dtype_decimal128:
stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal);
break;
case dtype_string:
stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) +
chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length;
break;
case dtype_none: stats_len = pb_fldlen_common;
default: break;
}
}
uint32_t tmp_stats_size;
block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size);
stats_pos += stats_size;
stats_size += tmp_stats_size;
if (idx < statistics_count) {
groups[idx].start_chunk = stats_pos;
groups[idx].num_chunks = stats_len;
}
__syncthreads();
}
}
struct stats_state_s {
uint8_t* base; ///< Output buffer start
uint8_t* end; ///< Output buffer end
statistics_chunk chunk;
statistics_merge_group group;
stats_column_desc col;
// ORC stats
uint64_t numberOfValues;
uint8_t hasNull;
};
/*
* Protobuf encoding - see
* https://developers.google.com/protocol-buffers/docs/encoding
*/
// Protobuf varint encoding for unsigned int
__device__ inline uint8_t* pb_encode_uint(uint8_t* p, uint64_t v)
{
while (v > 0x7f) {
*p++ = ((uint32_t)v | 0x80);
v >>= 7;
}
*p++ = v;
return p;
}
// Protobuf field encoding for unsigned int
__device__ inline uint8_t* pb_put_uint(uint8_t* p, uint32_t id, uint64_t v)
{
p[0] = id * 8 + static_cast<ProtofType>(ProtofType::VARINT); // NOTE: Assumes id < 16
return pb_encode_uint(p + 1, v);
}
// Protobuf field encoding for signed int
__device__ inline uint8_t* pb_put_int(uint8_t* p, uint32_t id, int64_t v)
{
int64_t s = (v < 0);
return pb_put_uint(p, id, (v ^ -s) * 2 + s);
}
// Protobuf field encoding for 'packed' unsigned int (single value)
__device__ inline uint8_t* pb_put_packed_uint(uint8_t* p, uint32_t id, uint64_t v)
{
uint8_t* p2 = pb_encode_uint(p + 2, v);
p[0] = id * 8 + ProtofType::FIXEDLEN;
p[1] = static_cast<uint8_t>(p2 - (p + 2));
return p2;
}
// Protobuf field encoding for binary/string
__device__ inline uint8_t* pb_put_binary(uint8_t* p, uint32_t id, const void* bytes, uint32_t len)
{
p[0] = id * 8 + ProtofType::FIXEDLEN;
p = pb_encode_uint(p + 1, len);
memcpy(p, bytes, len);
return p + len;
}
// Protobuf field encoding for 64-bit raw encoding (double)
__device__ inline uint8_t* pb_put_fixed64(uint8_t* p, uint32_t id, const void* raw64)
{
p[0] = id * 8 + ProtofType::FIXED64;
memcpy(p + 1, raw64, 8);
return p + 9;
}
/**
* @brief Encode statistics in ORC protobuf format
*
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*
* ORC statistics format from https://orc.apache.org/specification/ORCv1/
*
* message ColumnStatistics {
* // the number of values
* optional uint64 numberOfValues = 1;
* // At most one of these has a value for any column
* optional IntegerStatistics intStatistics = 2;
* optional DoubleStatistics doubleStatistics = 3;
* optional StringStatistics stringStatistics = 4;
* optional BucketStatistics bucketStatistics = 5;
* optional DecimalStatistics decimalStatistics = 6;
* optional DateStatistics dateStatistics = 7;
* optional BinaryStatistics binaryStatistics = 8;
* optional TimestampStatistics timestampStatistics = 9;
* optional bool hasNull = 10;
* }
*/
constexpr unsigned int encode_threads_per_chunk = 32;
constexpr unsigned int encode_chunks_per_block = 4;
constexpr unsigned int encode_threads_per_block =
encode_threads_per_chunk * encode_chunks_per_block;
__global__ void __launch_bounds__(encode_threads_per_block)
gpu_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count)
{
__shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block];
uint32_t t = threadIdx.x;
uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y;
stats_state_s* const s = &state_g[threadIdx.y];
// Encode and update actual bfr size
if (idx < statistics_count && t == 0) {
s->chunk = chunks[idx];
s->group = groups[idx];
s->col = *(s->group.col);
s->base = blob_bfr + s->group.start_chunk;
s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks;
uint8_t* cur = pb_put_uint(s->base, 1, s->chunk.non_nulls);
uint8_t* fld_start = cur;
switch (s->col.stats_dtype) {
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_int64:
// intStatistics = 2
// message IntegerStatistics {
// optional sint64 minimum = 1;
// optional sint64 maximum = 2;
// optional sint64 sum = 3;
// }
if (s->chunk.has_minmax || s->chunk.has_sum) {
*cur = 2 * 8 + ProtofType::FIXEDLEN;
cur += 2;
if (s->chunk.has_minmax) {
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
}
if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); }
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_float32:
case dtype_float64:
// doubleStatistics = 3
// message DoubleStatistics {
// optional double minimum = 1;
// optional double maximum = 2;
// optional double sum = 3;
// }
if (s->chunk.has_minmax) {
*cur = 3 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val);
cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_string:
// stringStatistics = 4
// message StringStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional sint64 sum = 3; // sum will store the total length of all strings
// }
if (s->chunk.has_minmax && s->chunk.has_sum) {
uint32_t sz = (pb_put_uint(cur, 3, s->chunk.sum.i_val) - cur) +
(pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) +
(pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) +
s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length;
cur[0] = 4 * 8 + ProtofType::FIXEDLEN;
cur = pb_encode_uint(cur + 1, sz);
cur = pb_put_binary(
cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length);
cur = pb_put_binary(
cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length);
cur = pb_put_uint(cur, 3, s->chunk.sum.i_val);
}
break;
case dtype_bool:
// bucketStatistics = 5
// message BucketStatistics {
// repeated uint64 count = 1 [packed=true];
// }
if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values
cur[0] = 5 * 8 + ProtofType::FIXEDLEN;
cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.u_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_decimal64:
case dtype_decimal128:
// decimalStatistics = 6
// message DecimalStatistics {
// optional string minimum = 1;
// optional string maximum = 2;
// optional string sum = 3;
// }
if (s->chunk.has_minmax) {
// TODO: Decimal support (decimal min/max stored as strings)
}
break;
case dtype_date32:
// dateStatistics = 7
// message DateStatistics { // min,max values saved as days since epoch
// optional sint32 minimum = 1;
// optional sint32 maximum = 2;
// }
if (s->chunk.has_minmax) {
cur[0] = 7 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 1, s->chunk.min_value.i_val);
cur = pb_put_int(cur, 2, s->chunk.max_value.i_val);
fld_start[1] = cur - (fld_start + 2);
}
break;
case dtype_timestamp64:
// timestampStatistics = 9
// message TimestampStatistics {
// optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch
// optional sint64 maximum = 2;
// optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch
// optional sint64 maximumUtc = 4;
// }
if (s->chunk.has_minmax) {
cur[0] = 9 * 8 + ProtofType::FIXEDLEN;
cur += 2;
cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc
cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc
fld_start[1] = cur - (fld_start + 2);
}
break;
default: break;
}
groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base);
}
}
void orc_init_statistics_groups(statistics_group* groups,
const stats_column_desc* cols,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
rmm::cuda_stream_view stream)
{
dim3 dim_grid((rowgroup_bounds.size().first + init_groups_per_block - 1) / init_groups_per_block,
rowgroup_bounds.size().second);
dim3 dim_block(init_threads_per_group, init_groups_per_block);
gpu_init_statistics_groups<<<dim_grid, dim_block, 0, stream.value()>>>(
groups, cols, rowgroup_bounds);
}
/**
* @brief Launches kernels to return statistics buffer offsets and sizes
*
* @param[in,out] groups Statistics merge groups
* @param[in] chunks Statistics chunks
* @param[in] statistics_count Number of statistics buffers to encode
* @param[in] stream CUDA stream used for device memory operations and kernel launches
*/
void orc_init_statistics_buffersize(statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
gpu_init_statistics_buffersize<block_size>
<<<1, block_size, 0, stream.value()>>>(groups, chunks, statistics_count);
}
/**
* @brief Launches kernel to encode statistics in ORC protobuf format
*
* @param[out] blob_bfr Output buffer for statistics blobs
* @param[in,out] groups Statistics merge groups
* @param[in,out] chunks Statistics data
* @param[in] statistics_count Number of statistics buffers
*/
void orc_encode_statistics(uint8_t* blob_bfr,
statistics_merge_group* groups,
const statistics_chunk* chunks,
uint32_t statistics_count,
rmm::cuda_stream_view stream)
{
unsigned int num_blocks =
(statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block;
dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block);
gpu_encode_statistics<<<num_blocks, dim_block, 0, stream.value()>>>(
blob_bfr, groups, chunks, statistics_count);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
0e0475809d6ed09d5f02b0e1feedfbf8384ff0ee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hnn_cubits.h"
#define BLOCK_SIZE 256
__global__ void mul_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] * a2[i];
}
__global__ void mulDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] * a2[i];
}
extern "C"
void mul(float *a1, float *a2, size_t size) {
hipLaunchKernelGGL(( mul_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1, a2, size);
}
extern "C"
void mulDouble(double *a1, double *a2, size_t size) {
hipLaunchKernelGGL(( mulDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1, a2, size);
}
__global__ void add_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] + a2[i];
}
__global__ void addDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] + a2[i];
}
extern "C"
void add(float *a1, float *a2, size_t size) {
hipLaunchKernelGGL(( add_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1, a2, size);
}
extern "C"
void addDouble(double *a1, double *a2, size_t size) {
hipLaunchKernelGGL(( addDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1, a2, size);
}
__global__ void abs_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = fabsf(a[i]);
}
__global__ void absDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = fabs(a[i]);
}
extern "C"
void tabs(float *a, size_t size) {
hipLaunchKernelGGL(( abs_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
extern "C"
void tabsDouble(double *a, size_t size) {
hipLaunchKernelGGL(( absDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void signum_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = signbit(a[i]) ? -1 : 1;
}
__global__ void signumDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = signbit(a[i]) ? -1 : 1;
}
extern "C"
void signum(float *a, size_t size) {
hipLaunchKernelGGL(( signum_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
extern "C"
void signumDouble(double *a, size_t size) {
hipLaunchKernelGGL(( signumDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void subtract_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] - a2[i];
}
__global__ void subtractDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] - a2[i];
}
extern "C"
void subtract(float *a1, float *a2, size_t size) {
hipLaunchKernelGGL(( subtract_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1, a2, size);
}
extern "C"
void subtractDouble(double *a1, double *a2, size_t size) {
hipLaunchKernelGGL(( subtractDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1, a2, size);
}
__global__ void negate_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = -a[i];
}
__global__ void negateDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = -a[i];
}
extern "C"
void negate(float *a, size_t size) {
hipLaunchKernelGGL(( negate_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
extern "C"
void negateDouble(double *a, size_t size) {
hipLaunchKernelGGL(( negateDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void scale_kernel(float s, float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] *= s;
}
__global__ void scaleDouble_kernel(double s, double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] *= s;
}
extern "C"
void scale(float s, float *a, size_t size) {
hipLaunchKernelGGL(( scale_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, s, a, size);
}
extern "C"
void scaleDouble(double s, double *a, size_t size) {
hipLaunchKernelGGL(( scaleDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, s, a, size);
}
__global__ void log_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = log(a[i]);
}
__global__ void logDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = log(a[i]);
}
extern "C"
void logFloat(float *a, size_t size) {
hipLaunchKernelGGL(( log_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
extern "C"
void logDouble(double *a, size_t size) {
hipLaunchKernelGGL(( logDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void inv_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = 1. / a[i];
}
__global__ void invDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = 1. / a[i];
}
extern "C"
void inv(float *a, size_t size) {
hipLaunchKernelGGL(( inv_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, a, size);
}
extern "C"
void invDouble(double *a, size_t size) {
hipLaunchKernelGGL(( invDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void exp_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = exp(a[i]);
}
__global__ void expDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = exp(a[i]);
}
extern "C"
void texp(float *a, size_t size) {
hipLaunchKernelGGL(( exp_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void texpDouble(double *a, size_t size) {
hipLaunchKernelGGL(( expDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void sqrt_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sqrt(a[i]);
}
__global__ void sqrtDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sqrt(a[i]);
}
extern "C"
void tsqrt(float *a, size_t size) {
hipLaunchKernelGGL(( sqrt_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tsqrtDouble(double *a, size_t size) {
hipLaunchKernelGGL(( sqrtDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void cos_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cos(a[i]);
}
__global__ void cosDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cos(a[i]);
}
extern "C"
void tcos(float *a, size_t size) {
hipLaunchKernelGGL(( cos_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tcosDouble(double *a, size_t size) {
hipLaunchKernelGGL(( cosDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void sin_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sin(a[i]);
}
__global__ void sinDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sin(a[i]);
}
extern "C"
void tsin(float *a, size_t size) {
hipLaunchKernelGGL(( sin_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tsinDouble(double *a, size_t size) {
hipLaunchKernelGGL(( sinDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void tan_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tan(a[i]);
}
__global__ void tanDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tan(a[i]);
}
extern "C"
void ttan(float *a, size_t size) {
hipLaunchKernelGGL(( tan_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void ttanDouble(double *a, size_t size) {
hipLaunchKernelGGL(( tanDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void asin_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asin(a[i]);
}
__global__ void asinDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asin(a[i]);
}
extern "C"
void tasin(float *a, size_t size) {
hipLaunchKernelGGL(( asin_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tasinDouble(double *a, size_t size) {
hipLaunchKernelGGL(( asinDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void acos_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acos(a[i]);
}
__global__ void acosDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acos(a[i]);
}
extern "C"
void tacos(float *a, size_t size) {
hipLaunchKernelGGL(( acos_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tacosDouble(double *a, size_t size) {
hipLaunchKernelGGL(( acosDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void atan_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atan(a[i]);
}
__global__ void atanDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atan(a[i]);
}
extern "C"
void tatan(float *a, size_t size) {
hipLaunchKernelGGL(( atan_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tatanDouble(double *a, size_t size) {
hipLaunchKernelGGL(( atanDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void sinh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sinh(a[i]);
}
__global__ void sinhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sinh(a[i]);
}
extern "C"
void tsinh(float *a, size_t size) {
hipLaunchKernelGGL(( sinh_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tsinhDouble(double *a, size_t size) {
hipLaunchKernelGGL(( sinhDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void cosh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cosh(a[i]);
}
__global__ void coshDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cosh(a[i]);
}
extern "C"
void tcosh(float *a, size_t size) {
hipLaunchKernelGGL(( cosh_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tcoshDouble(double *a, size_t size) {
hipLaunchKernelGGL(( coshDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void tanh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tanh(a[i]);
}
__global__ void tanhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tanh(a[i]);
}
extern "C"
void ttanh(float *a, size_t size) {
hipLaunchKernelGGL(( tanh_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void ttanhDouble(double *a, size_t size) {
hipLaunchKernelGGL(( tanhDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void asinh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asinh(a[i]);
}
__global__ void asinhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asinh(a[i]);
}
extern "C"
void tasinh(float *a, size_t size) {
hipLaunchKernelGGL(( asinh_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tasinhDouble(double *a, size_t size) {
hipLaunchKernelGGL(( asinhDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void acosh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acosh(a[i]);
}
__global__ void acoshDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acosh(a[i]);
}
extern "C"
void tacosh(float *a, size_t size) {
hipLaunchKernelGGL(( acosh_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tacoshDouble(double *a, size_t size) {
hipLaunchKernelGGL(( acoshDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void atanh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atanh(a[i]);
}
__global__ void atanhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atanh(a[i]);
}
extern "C"
void tatanh(float *a, size_t size) {
hipLaunchKernelGGL(( atanh_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a,size);
}
extern "C"
void tatanhDouble(double *a, size_t size) {
hipLaunchKernelGGL(( atanhDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a, size);
}
__global__ void pow_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = pow(a1[i], a2[i]);
}
__global__ void powDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = pow(a1[i], a2[i]);
}
extern "C"
void tpow(float *a1, float *a2, size_t size) {
hipLaunchKernelGGL(( pow_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1,a2,size);
}
extern "C"
void tpowDouble(double *a1, double *a2, size_t size) {
hipLaunchKernelGGL(( powDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1,a2,size);
}
__global__ void max_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = max(a1[i], a2[i]);
}
__global__ void maxDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = max(a1[i], a2[i]);
}
extern "C"
void tmax(float *a1, float *a2, size_t size) {
hipLaunchKernelGGL(( max_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1,a2,size);
}
extern "C"
void tmaxDouble(double *a1, double *a2, size_t size) {
hipLaunchKernelGGL(( maxDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, a1,a2,size);
}
__global__ void broadcast_copy_kernel(int nbdim, size_t size, float *inp, int *inp_shape, float *out, int *out_shape) {
// i is the index in the output, assumed to be in row-major order.
int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int acc = out_idx;
int *out_indices;
int inp_idx;
if (out_idx < size) {
out_indices = new int[nbdim];
for (int i = nbdim-1; i >= 0; i--) {
out_indices[i] = acc % inp_shape[i];
acc = acc / out_shape[i];
}
inp_idx = out_indices[0];
for (int i = 1; i < nbdim; i++) {
inp_idx = out_indices[i] + inp_shape[i] * inp_idx;
}
delete out_indices;
out[out_idx] = inp[inp_idx];
}
}
__global__ void broadcast_copyDouble_kernel(int nbdim, size_t size, double *inp, int *inp_shape, double *out, int *out_shape) {
// i is the index in the output, assumed to be in row-major order.
int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int acc = out_idx;
int *out_indices;
int inp_idx;
if (out_idx < size) {
out_indices = new int[nbdim];
for (int i = nbdim-1; i >= 0; i--) {
out_indices[i] = acc % inp_shape[i];
acc = acc / out_shape[i];
}
inp_idx = out_indices[0];
for (int i = 1; i < nbdim; i++) {
inp_idx = out_indices[i] + inp_shape[i] * inp_idx;
}
delete out_indices;
out[out_idx] = inp[inp_idx];
}
}
__global__ void broadcast_backward_kernel(int nbdim, size_t inp_size, double *inp, int *inp_shape, double *out, int *out_shape) {
// i is the index in the input, assumed to be in row-major order.
int inp_idx = blockIdx.x * blockDim.x + threadIdx.x;
int acc = inp_idx;
int *inp_indices;
int out_idx;
if (inp_idx < inp_size) {
inp_indices = new int[nbdim];
for (int i = nbdim-1; i >= 0; i--) {
inp_indices[i] = acc % inp_shape[i];
acc = acc / inp_shape[i];
}
for (int i = 0; i < nbdim; i++) {
}
delete inp_indices;
}
}
extern "C"
void broadcast_copy(int nbdim, size_t size, float *inp, int *inp_shape, float *out, int *out_shape) {
hipLaunchKernelGGL(( broadcast_copy_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, nbdim, size, inp, inp_shape, out, out_shape);
}
extern "C"
void broadcast_copyDouble(int nbdim, size_t size, double *inp, int *inp_shape, double * out, int *out_shape) {
hipLaunchKernelGGL(( broadcast_copyDouble_kernel), dim3((size + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, nbdim, size, inp, inp_shape, out, out_shape);
}
extern "C"
void freeDevicePtr(void *ptr) {
hipFree(ptr);
}
extern "C"
void freeCuRANDGenerator(hiprandGenerator_t gen) {
hiprandDestroyGenerator(gen);
}
|
0e0475809d6ed09d5f02b0e1feedfbf8384ff0ee.cu
|
#include "hnn_cubits.h"
#define BLOCK_SIZE 256
__global__ void mul_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] * a2[i];
}
__global__ void mulDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] * a2[i];
}
extern "C"
void mul(float *a1, float *a2, size_t size) {
mul_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1, a2, size);
}
extern "C"
void mulDouble(double *a1, double *a2, size_t size) {
mulDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1, a2, size);
}
__global__ void add_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] + a2[i];
}
__global__ void addDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] + a2[i];
}
extern "C"
void add(float *a1, float *a2, size_t size) {
add_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1, a2, size);
}
extern "C"
void addDouble(double *a1, double *a2, size_t size) {
addDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1, a2, size);
}
__global__ void abs_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = fabsf(a[i]);
}
__global__ void absDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = fabs(a[i]);
}
extern "C"
void tabs(float *a, size_t size) {
abs_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
extern "C"
void tabsDouble(double *a, size_t size) {
absDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void signum_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = signbit(a[i]) ? -1 : 1;
}
__global__ void signumDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = signbit(a[i]) ? -1 : 1;
}
extern "C"
void signum(float *a, size_t size) {
signum_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
extern "C"
void signumDouble(double *a, size_t size) {
signumDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void subtract_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] - a2[i];
}
__global__ void subtractDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = a1[i] - a2[i];
}
extern "C"
void subtract(float *a1, float *a2, size_t size) {
subtract_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1, a2, size);
}
extern "C"
void subtractDouble(double *a1, double *a2, size_t size) {
subtractDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1, a2, size);
}
__global__ void negate_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = -a[i];
}
__global__ void negateDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = -a[i];
}
extern "C"
void negate(float *a, size_t size) {
negate_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
extern "C"
void negateDouble(double *a, size_t size) {
negateDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void scale_kernel(float s, float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] *= s;
}
__global__ void scaleDouble_kernel(double s, double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] *= s;
}
extern "C"
void scale(float s, float *a, size_t size) {
scale_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(s, a, size);
}
extern "C"
void scaleDouble(double s, double *a, size_t size) {
scaleDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(s, a, size);
}
__global__ void log_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = log(a[i]);
}
__global__ void logDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = log(a[i]);
}
extern "C"
void logFloat(float *a, size_t size) {
log_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
extern "C"
void logDouble(double *a, size_t size) {
logDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void inv_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = 1. / a[i];
}
__global__ void invDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = 1. / a[i];
}
extern "C"
void inv(float *a, size_t size) {
inv_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE,BLOCK_SIZE>>>(a, size);
}
extern "C"
void invDouble(double *a, size_t size) {
invDouble_kernel<<< (size + BLOCK_SIZE - 1) / BLOCK_SIZE,BLOCK_SIZE>>>(a, size);
}
__global__ void exp_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = exp(a[i]);
}
__global__ void expDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = exp(a[i]);
}
extern "C"
void texp(float *a, size_t size) {
exp_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void texpDouble(double *a, size_t size) {
expDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void sqrt_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sqrt(a[i]);
}
__global__ void sqrtDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sqrt(a[i]);
}
extern "C"
void tsqrt(float *a, size_t size) {
sqrt_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tsqrtDouble(double *a, size_t size) {
sqrtDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void cos_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cos(a[i]);
}
__global__ void cosDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cos(a[i]);
}
extern "C"
void tcos(float *a, size_t size) {
cos_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tcosDouble(double *a, size_t size) {
cosDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void sin_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sin(a[i]);
}
__global__ void sinDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sin(a[i]);
}
extern "C"
void tsin(float *a, size_t size) {
sin_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tsinDouble(double *a, size_t size) {
sinDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void tan_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tan(a[i]);
}
__global__ void tanDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tan(a[i]);
}
extern "C"
void ttan(float *a, size_t size) {
tan_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void ttanDouble(double *a, size_t size) {
tanDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void asin_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asin(a[i]);
}
__global__ void asinDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asin(a[i]);
}
extern "C"
void tasin(float *a, size_t size) {
asin_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tasinDouble(double *a, size_t size) {
asinDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void acos_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acos(a[i]);
}
__global__ void acosDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acos(a[i]);
}
extern "C"
void tacos(float *a, size_t size) {
acos_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tacosDouble(double *a, size_t size) {
acosDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void atan_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atan(a[i]);
}
__global__ void atanDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atan(a[i]);
}
extern "C"
void tatan(float *a, size_t size) {
atan_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tatanDouble(double *a, size_t size) {
atanDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void sinh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sinh(a[i]);
}
__global__ void sinhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = sinh(a[i]);
}
extern "C"
void tsinh(float *a, size_t size) {
sinh_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tsinhDouble(double *a, size_t size) {
sinhDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void cosh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cosh(a[i]);
}
__global__ void coshDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = cosh(a[i]);
}
extern "C"
void tcosh(float *a, size_t size) {
cosh_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tcoshDouble(double *a, size_t size) {
coshDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void tanh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tanh(a[i]);
}
__global__ void tanhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = tanh(a[i]);
}
extern "C"
void ttanh(float *a, size_t size) {
tanh_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void ttanhDouble(double *a, size_t size) {
tanhDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void asinh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asinh(a[i]);
}
__global__ void asinhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = asinh(a[i]);
}
extern "C"
void tasinh(float *a, size_t size) {
asinh_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tasinhDouble(double *a, size_t size) {
asinhDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void acosh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acosh(a[i]);
}
__global__ void acoshDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = acosh(a[i]);
}
extern "C"
void tacosh(float *a, size_t size) {
acosh_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tacoshDouble(double *a, size_t size) {
acoshDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void atanh_kernel(float *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atanh(a[i]);
}
__global__ void atanhDouble_kernel(double *a, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a[i] = atanh(a[i]);
}
extern "C"
void tatanh(float *a, size_t size) {
atanh_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a,size);
}
extern "C"
void tatanhDouble(double *a, size_t size) {
atanhDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a, size);
}
__global__ void pow_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = pow(a1[i], a2[i]);
}
__global__ void powDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = pow(a1[i], a2[i]);
}
extern "C"
void tpow(float *a1, float *a2, size_t size) {
pow_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1,a2,size);
}
extern "C"
void tpowDouble(double *a1, double *a2, size_t size) {
powDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1,a2,size);
}
__global__ void max_kernel(float *a1, float *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = max(a1[i], a2[i]);
}
__global__ void maxDouble_kernel(double *a1, double *a2, size_t size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
a2[i] = max(a1[i], a2[i]);
}
extern "C"
void tmax(float *a1, float *a2, size_t size) {
max_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1,a2,size);
}
extern "C"
void tmaxDouble(double *a1, double *a2, size_t size) {
maxDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(a1,a2,size);
}
__global__ void broadcast_copy_kernel(int nbdim, size_t size, float *inp, int *inp_shape, float *out, int *out_shape) {
// i is the index in the output, assumed to be in row-major order.
int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int acc = out_idx;
int *out_indices;
int inp_idx;
if (out_idx < size) {
out_indices = new int[nbdim];
for (int i = nbdim-1; i >= 0; i--) {
out_indices[i] = acc % inp_shape[i];
acc = acc / out_shape[i];
}
inp_idx = out_indices[0];
for (int i = 1; i < nbdim; i++) {
inp_idx = out_indices[i] + inp_shape[i] * inp_idx;
}
delete out_indices;
out[out_idx] = inp[inp_idx];
}
}
__global__ void broadcast_copyDouble_kernel(int nbdim, size_t size, double *inp, int *inp_shape, double *out, int *out_shape) {
// i is the index in the output, assumed to be in row-major order.
int out_idx = blockIdx.x * blockDim.x + threadIdx.x;
int acc = out_idx;
int *out_indices;
int inp_idx;
if (out_idx < size) {
out_indices = new int[nbdim];
for (int i = nbdim-1; i >= 0; i--) {
out_indices[i] = acc % inp_shape[i];
acc = acc / out_shape[i];
}
inp_idx = out_indices[0];
for (int i = 1; i < nbdim; i++) {
inp_idx = out_indices[i] + inp_shape[i] * inp_idx;
}
delete out_indices;
out[out_idx] = inp[inp_idx];
}
}
__global__ void broadcast_backward_kernel(int nbdim, size_t inp_size, double *inp, int *inp_shape, double *out, int *out_shape) {
// i is the index in the input, assumed to be in row-major order.
int inp_idx = blockIdx.x * blockDim.x + threadIdx.x;
int acc = inp_idx;
int *inp_indices;
int out_idx;
if (inp_idx < inp_size) {
inp_indices = new int[nbdim];
for (int i = nbdim-1; i >= 0; i--) {
inp_indices[i] = acc % inp_shape[i];
acc = acc / inp_shape[i];
}
for (int i = 0; i < nbdim; i++) {
}
delete inp_indices;
}
}
extern "C"
void broadcast_copy(int nbdim, size_t size, float *inp, int *inp_shape, float *out, int *out_shape) {
broadcast_copy_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(nbdim, size, inp, inp_shape, out, out_shape);
}
extern "C"
void broadcast_copyDouble(int nbdim, size_t size, double *inp, int *inp_shape, double * out, int *out_shape) {
broadcast_copyDouble_kernel<<<(size + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(nbdim, size, inp, inp_shape, out, out_shape);
}
extern "C"
void freeDevicePtr(void *ptr) {
cudaFree(ptr);
}
extern "C"
void freeCuRANDGenerator(curandGenerator_t gen) {
curandDestroyGenerator(gen);
}
|
3512d675e999b251a02f00c23acecc903ad52a10.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void kernel(float* dA, float* dB, float* dC, int dim)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int idy = threadIdx.y + blockIdx.y*blockDim.y;
if(idx<dim && idy<dim)
dC[idx+idy*dim]=dA[idx+idy*dim]+dB[idx+idy*dim];
}
|
3512d675e999b251a02f00c23acecc903ad52a10.cu
|
__global__
void kernel(float* dA, float* dB, float* dC, int dim)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int idy = threadIdx.y + blockIdx.y*blockDim.y;
if(idx<dim && idy<dim)
dC[idx+idy*dim]=dA[idx+idy*dim]+dB[idx+idy*dim];
}
|
2cd16ebc0f3c60862140762054e0f905a46d74b4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* GridTools
*
* Copyright (c) 2014-2023, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gridtools/common/cuda_is_ptr.hpp>
#include <utility>
#include <gtest/gtest.h>
#include <gridtools/common/cuda_util.hpp>
using gridtools::is_gpu_ptr;
using gridtools::cuda_util::cuda_malloc;
TEST(test_is_gpu_ptr, host_ptr_is_no_cuda_ptr) {
auto testee = std::unique_ptr<double>(new double);
EXPECT_FALSE(is_gpu_ptr(testee.get()));
EXPECT_EQ(hipSuccess, hipGetLastError());
}
TEST(test_is_gpu_ptr, cuda_ptr_is_cuda_ptr) {
auto testee = cuda_malloc<double>();
EXPECT_TRUE(is_gpu_ptr(testee.get()));
EXPECT_EQ(hipSuccess, hipGetLastError());
}
TEST(test_is_gpu_ptr, cuda_ptr_inner_region_are_cuda_ptr) {
auto testee = cuda_malloc<double[]>(2);
EXPECT_TRUE(is_gpu_ptr(testee.get() + 1));
EXPECT_EQ(hipSuccess, hipGetLastError());
}
|
2cd16ebc0f3c60862140762054e0f905a46d74b4.cu
|
/*
* GridTools
*
* Copyright (c) 2014-2023, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gridtools/common/cuda_is_ptr.hpp>
#include <utility>
#include <gtest/gtest.h>
#include <gridtools/common/cuda_util.hpp>
using gridtools::is_gpu_ptr;
using gridtools::cuda_util::cuda_malloc;
TEST(test_is_gpu_ptr, host_ptr_is_no_cuda_ptr) {
auto testee = std::unique_ptr<double>(new double);
EXPECT_FALSE(is_gpu_ptr(testee.get()));
EXPECT_EQ(cudaSuccess, cudaGetLastError());
}
TEST(test_is_gpu_ptr, cuda_ptr_is_cuda_ptr) {
auto testee = cuda_malloc<double>();
EXPECT_TRUE(is_gpu_ptr(testee.get()));
EXPECT_EQ(cudaSuccess, cudaGetLastError());
}
TEST(test_is_gpu_ptr, cuda_ptr_inner_region_are_cuda_ptr) {
auto testee = cuda_malloc<double[]>(2);
EXPECT_TRUE(is_gpu_ptr(testee.get() + 1));
EXPECT_EQ(cudaSuccess, cudaGetLastError());
}
|
9f171ae92a035f9320cafc4e53b85a01a54efe0c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "cuda_auxiliary.h"
/*
* compile: nvcc specstp.cu -lcublas -o specstp
*/
int main(int argc, char **argv)
{
hipblasHandle_t cublas_handle;
FILE *fp_A = NULL;
FILE *fp_x = NULL;
double *hst_A = NULL;
double *hst_x = NULL;
double *dev_A = NULL;
double *dev_x = NULL;
double *dev_y = NULL;
double norm;
double eigval;
const double ONE = 1.0;
const double ZERO = 0.0;
double alpha;
int dim;
int steps;
if (argc != 5) {
fprintf(stderr, "usage: %s N A.dat x0.dat steps\n", argv[0]);
exit(EXIT_FAILURE);
}
dim = atoi(argv[1]);
steps = atoi(argv[4]);
open_file(fp_A, argv[2], "r");
host_alloc(hst_A, double, dim * dim);
open_file(fp_x, argv[3], "r");
host_alloc(hst_x, double, dim);
read_file(hst_A, sizeof(double), dim * dim, fp_A);
read_file(hst_x, sizeof(double), dim, fp_x);
cuda_exec(hipMalloc(&dev_A, dim * dim * sizeof(double)));
cuda_exec(hipMalloc(&dev_x, dim * sizeof(double)));
cuda_exec(hipMalloc(&dev_y, dim * sizeof(double)));
cublas_exec(hipblasCreate(&cublas_handle));
cublas_exec(hipblasSetPointerMode(cublas_handle, HIPBLAS_POINTER_MODE_HOST));
cublas_exec(hipblasSetMatrix(dim, dim, sizeof(double), hst_A, dim, dev_A, dim));
cublas_exec(hipblasSetVector(dim, sizeof(double), hst_x, 1, dev_x, 1));
cublas_exec(hipblasSetVector(dim, sizeof(double), hst_x, 1, dev_y, 1));
int i;
for (i = 0; i < steps; ++i){
cublas_exec(hipblasDnrm2(cublas_handle, dim, dev_y, 1, &norm));
alpha = 1.0/norm;
cublas_exec(hipblasDscal(cublas_handle, dim, &alpha, dev_y, 1));
cublas_exec(hipblasDcopy(cublas_handle, dim, dev_y, 1, dev_x, 1));
cublas_exec(hipblasDgemv(cublas_handle, HIPBLAS_OP_T, dim, dim, &ONE, dev_A, dim, dev_x, 1, &ZERO, dev_y, 1));
}
cublas_exec(hipblasDdot(cublas_handle, dim, dev_x, 1, dev_y, 1, &eigval));
printf("\nSpectrum: %#.16lg\n", eigval);
cublas_exec(hipblasDestroy(cublas_handle));
hipFree(dev_A);
hipFree(dev_x);
hipFree(dev_y);
host_free(hst_A);
host_free(hst_x);
return 0;
}
|
9f171ae92a035f9320cafc4e53b85a01a54efe0c.cu
|
#include <stdio.h>
#include "cuda_auxiliary.h"
/*
* compile: nvcc specstp.cu -lcublas -o specstp
*/
int main(int argc, char **argv)
{
cublasHandle_t cublas_handle;
FILE *fp_A = NULL;
FILE *fp_x = NULL;
double *hst_A = NULL;
double *hst_x = NULL;
double *dev_A = NULL;
double *dev_x = NULL;
double *dev_y = NULL;
double norm;
double eigval;
const double ONE = 1.0;
const double ZERO = 0.0;
double alpha;
int dim;
int steps;
if (argc != 5) {
fprintf(stderr, "usage: %s N A.dat x0.dat steps\n", argv[0]);
exit(EXIT_FAILURE);
}
dim = atoi(argv[1]);
steps = atoi(argv[4]);
open_file(fp_A, argv[2], "r");
host_alloc(hst_A, double, dim * dim);
open_file(fp_x, argv[3], "r");
host_alloc(hst_x, double, dim);
read_file(hst_A, sizeof(double), dim * dim, fp_A);
read_file(hst_x, sizeof(double), dim, fp_x);
cuda_exec(cudaMalloc(&dev_A, dim * dim * sizeof(double)));
cuda_exec(cudaMalloc(&dev_x, dim * sizeof(double)));
cuda_exec(cudaMalloc(&dev_y, dim * sizeof(double)));
cublas_exec(cublasCreate(&cublas_handle));
cublas_exec(cublasSetPointerMode(cublas_handle, CUBLAS_POINTER_MODE_HOST));
cublas_exec(cublasSetMatrix(dim, dim, sizeof(double), hst_A, dim, dev_A, dim));
cublas_exec(cublasSetVector(dim, sizeof(double), hst_x, 1, dev_x, 1));
cublas_exec(cublasSetVector(dim, sizeof(double), hst_x, 1, dev_y, 1));
int i;
for (i = 0; i < steps; ++i){
cublas_exec(cublasDnrm2(cublas_handle, dim, dev_y, 1, &norm));
alpha = 1.0/norm;
cublas_exec(cublasDscal(cublas_handle, dim, &alpha, dev_y, 1));
cublas_exec(cublasDcopy(cublas_handle, dim, dev_y, 1, dev_x, 1));
cublas_exec(cublasDgemv(cublas_handle, CUBLAS_OP_T, dim, dim, &ONE, dev_A, dim, dev_x, 1, &ZERO, dev_y, 1));
}
cublas_exec(cublasDdot(cublas_handle, dim, dev_x, 1, dev_y, 1, &eigval));
printf("\nSpectrum: %#.16lg\n", eigval);
cublas_exec(cublasDestroy(cublas_handle));
cudaFree(dev_A);
cudaFree(dev_x);
cudaFree(dev_y);
host_free(hst_A);
host_free(hst_x);
return 0;
}
|
5afb09a7439172b0f6429d33e9f059cfa17bf851.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../kernels.hpp"
#include "sph.cuh"
#include "utils.cuh"
namespace sphexa
{
namespace sph
{
namespace cuda
{
namespace kernels
{
const double gradh_i = 1.0;
const double gradh_j = 1.0;
template <typename T>
__global__ void computeMomentumAndEnergyIAD(const int n, const T sincIndex, const T K, const int ngmax, const BBox<T> *bbox,
const int *clist, const int *neighbors, const int *neighborsCount, const T *x, const T *y,
const T *z, const T *vx, const T *vy, const T *vz, const T *h, const T *m, const T *ro,
const T *p, const T *c, const T *c11, const T *c12, const T *c13, const T *c22, const T *c23,
const T *c33, T *grad_P_x, T *grad_P_y, T *grad_P_z, T *du)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) return;
const int i = clist[tid];
const int nn = neighborsCount[tid];
T momentum_x = 0.0, momentum_y = 0.0, momentum_z = 0.0, energy = 0.0, energyAV = 0.0;
for (int pj = 0; pj < nn; ++pj)
{
const int j = neighbors[tid * ngmax + pj];
T r_ijx = (x[i] - x[j]);
T r_ijy = (y[i] - y[j]);
T r_ijz = (z[i] - z[j]);
T r_jix = (x[j] - x[i]);
T r_jiy = (y[j] - y[i]);
T r_jiz = (z[j] - z[i]);
applyPBC(*bbox, 2.0 * h[i], r_ijx, r_ijy, r_ijz);
applyPBC(*bbox, 2.0 * h[i], r_jix, r_jiy, r_jiz);
const T dist = std::sqrt(r_ijx * r_ijx + r_ijy * r_ijy + r_ijz * r_ijz);
const T v_ijx = (vx[i] - vx[j]);
const T v_ijy = (vy[i] - vy[j]);
const T v_ijz = (vz[i] - vz[j]);
const T v1 = dist / h[i];
const T v2 = dist / h[j];
const T rv = r_ijx * v_ijx + r_ijy * v_ijy + r_ijz * v_ijz;
const T w1 = K * math_namespace::pow(wharmonic(v1), (int)sincIndex);
const T w2 = K * math_namespace::pow(wharmonic(v2), (int)sincIndex);
const T W1 = w1 / (h[i] * h[i] * h[i]);
const T W2 = w2 / (h[j] * h[j] * h[j]);
const T kern11_i = c11[i] * r_jix;
const T kern12_i = c12[i] * r_jiy;
const T kern13_i = c13[i] * r_jiz;
const T kern21_i = c12[i] * r_jix;
const T kern22_i = c22[i] * r_jiy;
const T kern23_i = c23[i] * r_jiz;
const T kern31_i = c13[i] * r_jix;
const T kern32_i = c23[i] * r_jiy;
const T kern33_i = c33[i] * r_jiz;
const T kern11_j = c11[j] * r_jix;
const T kern12_j = c12[j] * r_jiy;
const T kern13_j = c13[j] * r_jiz;
const T kern21_j = c12[j] * r_jix;
const T kern22_j = c22[j] * r_jiy;
const T kern23_j = c23[j] * r_jiz;
const T kern31_j = c13[j] * r_jix;
const T kern32_j = c23[j] * r_jiy;
const T kern33_j = c33[j] * r_jiz;
const T termA1_i = (kern11_i + kern12_i + kern13_i) * W1;
const T termA2_i = (kern21_i + kern22_i + kern23_i) * W1;
const T termA3_i = (kern31_i + kern32_i + kern33_i) * W1;
const T termA1_j = (kern11_j + kern12_j + kern13_j) * W2;
const T termA2_j = (kern21_j + kern22_j + kern23_j) * W2;
const T termA3_j = (kern31_j + kern32_j + kern33_j) * W2;
const T pro_i = p[i] / (gradh_i * ro[i] * ro[i]);
const T pro_j = p[j] / (gradh_j * ro[j] * ro[j]);
const T r_square = dist * dist;
const T viscosity_ij = artificial_viscosity(ro[i], ro[j], h[i], h[j], c[i], c[j], rv, r_square);
const T grad_Px_AV = 0.5 * (m[i] / ro[i] * viscosity_ij * termA1_i + m[j] / ro[j] * viscosity_ij * termA1_j);
const T grad_Py_AV = 0.5 * (m[i] / ro[i] * viscosity_ij * termA2_i + m[j] / ro[j] * viscosity_ij * termA2_j);
const T grad_Pz_AV = 0.5 * (m[i] / ro[i] * viscosity_ij * termA3_i + m[j] / ro[j] * viscosity_ij * termA3_j);
momentum_x += m[j] * (pro_i * termA1_i + pro_j * termA1_j) + grad_Px_AV;
momentum_y += m[j] * (pro_i * termA2_i + pro_j * termA2_j) + grad_Py_AV;
momentum_z += m[j] * (pro_i * termA3_i + pro_j * termA3_j) + grad_Pz_AV;
energy += m[j] * 2.0 * pro_i * (v_ijx * termA1_i + v_ijy * termA2_i + v_ijz * termA3_i);
energyAV += grad_Px_AV * v_ijx + grad_Py_AV * v_ijy + grad_Pz_AV * v_ijz;
}
du[tid] = 0.5 * (energy + energyAV);
grad_P_x[tid] = momentum_x;
grad_P_y[tid] = momentum_y;
grad_P_z[tid] = momentum_z;
}
} // namespace kernels
template void computeMomentumAndEnergyIAD<double, SqPatch<double>>(const std::vector<int> &clist, SqPatch<double> &d);
template <typename T, class Dataset>
void computeMomentumAndEnergyIAD(const std::vector<int> &clist, Dataset &d)
{
const size_t n = clist.size();
const size_t np = d.x.size();
const size_t allNeighbors = n * d.ngmax;
const size_t size_bbox = sizeof(BBox<T>);
const size_t size_np_T = np * sizeof(T);
const size_t size_n_int = n * sizeof(int);
const size_t size_n_T = n * sizeof(T);
const size_t size_allNeighbors = allNeighbors * sizeof(int);
int *d_clist, *d_neighbors, *d_neighborsCount;
T *d_x, *d_y, *d_z, *d_vx, *d_vy, *d_vz, *d_m, *d_h, *d_ro, *d_p, *d_c, *d_c11, *d_c12, *d_c13, *d_c22, *d_c23, *d_c33;
BBox<T> *d_bbox;
T *d_grad_P_x, *d_grad_P_y, *d_grad_P_z, *d_du;
// input data
CHECK_CUDA_ERR(utils::hipMalloc(size_n_int, d_clist, d_neighborsCount));
CHECK_CUDA_ERR(utils::hipMalloc(size_allNeighbors, d_neighbors));
CHECK_CUDA_ERR(utils::hipMalloc(size_bbox, d_bbox));
CHECK_CUDA_ERR(
utils::hipMalloc(size_np_T, d_x, d_y, d_z, d_vx, d_vy, d_vz, d_h, d_m, d_ro, d_p, d_c, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33));
// oputput data
CHECK_CUDA_ERR(utils::hipMalloc(size_n_T, d_grad_P_x, d_grad_P_y, d_grad_P_z, d_du));
CHECK_CUDA_ERR(hipMemcpy(d_x, d.x.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_y, d.y.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_z, d.z.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_vx, d.vx.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_vy, d.vy.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_vz, d.vz.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_h, d.h.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_m, d.m.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_ro, d.ro.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_p, d.p.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_c, d.c.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_c11, d.c11.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_c12, d.c12.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_c13, d.c13.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_c22, d.c22.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_c23, d.c23.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_c33, d.c33.data(), size_np_T, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_bbox, &d.bbox, size_bbox, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_clist, clist.data(), size_n_int, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_neighbors, d.neighbors.data(), size_allNeighbors, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipMemcpy(d_neighborsCount, d.neighborsCount.data(), size_n_int, hipMemcpyHostToDevice));
const int threadsPerBlock = 256;
const int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( kernels::computeMomentumAndEnergyIAD), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
n, d.sincIndex, d.K, d.ngmax, d_bbox, d_clist, d_neighbors, d_neighborsCount, d_x, d_y, d_z, d_vx, d_vy, d_vz, d_h, d_m, d_ro, d_p,
d_c, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33, d_grad_P_x, d_grad_P_y, d_grad_P_z, d_du);
CHECK_CUDA_ERR(hipGetLastError());
CHECK_CUDA_ERR(hipMemcpy(d.grad_P_x.data(), d_grad_P_x, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.grad_P_y.data(), d_grad_P_y, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.grad_P_z.data(), d_grad_P_z, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipMemcpy(d.du.data(), d_du, size_n_T, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(utils::hipFree(d_clist, d_neighborsCount, d_neighbors, d_bbox, d_x, d_y, d_z, d_vx, d_vy, d_vz, d_h, d_m, d_ro, d_p,
d_c, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33, d_grad_P_x, d_grad_P_y, d_grad_P_z, d_du));
}
} // namespace cuda
} // namespace sph
} // namespace sphexa
|
5afb09a7439172b0f6429d33e9f059cfa17bf851.cu
|
#include <cuda.h>
#include "../kernels.hpp"
#include "sph.cuh"
#include "utils.cuh"
namespace sphexa
{
namespace sph
{
namespace cuda
{
namespace kernels
{
const double gradh_i = 1.0;
const double gradh_j = 1.0;
template <typename T>
__global__ void computeMomentumAndEnergyIAD(const int n, const T sincIndex, const T K, const int ngmax, const BBox<T> *bbox,
const int *clist, const int *neighbors, const int *neighborsCount, const T *x, const T *y,
const T *z, const T *vx, const T *vy, const T *vz, const T *h, const T *m, const T *ro,
const T *p, const T *c, const T *c11, const T *c12, const T *c13, const T *c22, const T *c23,
const T *c33, T *grad_P_x, T *grad_P_y, T *grad_P_z, T *du)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) return;
const int i = clist[tid];
const int nn = neighborsCount[tid];
T momentum_x = 0.0, momentum_y = 0.0, momentum_z = 0.0, energy = 0.0, energyAV = 0.0;
for (int pj = 0; pj < nn; ++pj)
{
const int j = neighbors[tid * ngmax + pj];
T r_ijx = (x[i] - x[j]);
T r_ijy = (y[i] - y[j]);
T r_ijz = (z[i] - z[j]);
T r_jix = (x[j] - x[i]);
T r_jiy = (y[j] - y[i]);
T r_jiz = (z[j] - z[i]);
applyPBC(*bbox, 2.0 * h[i], r_ijx, r_ijy, r_ijz);
applyPBC(*bbox, 2.0 * h[i], r_jix, r_jiy, r_jiz);
const T dist = std::sqrt(r_ijx * r_ijx + r_ijy * r_ijy + r_ijz * r_ijz);
const T v_ijx = (vx[i] - vx[j]);
const T v_ijy = (vy[i] - vy[j]);
const T v_ijz = (vz[i] - vz[j]);
const T v1 = dist / h[i];
const T v2 = dist / h[j];
const T rv = r_ijx * v_ijx + r_ijy * v_ijy + r_ijz * v_ijz;
const T w1 = K * math_namespace::pow(wharmonic(v1), (int)sincIndex);
const T w2 = K * math_namespace::pow(wharmonic(v2), (int)sincIndex);
const T W1 = w1 / (h[i] * h[i] * h[i]);
const T W2 = w2 / (h[j] * h[j] * h[j]);
const T kern11_i = c11[i] * r_jix;
const T kern12_i = c12[i] * r_jiy;
const T kern13_i = c13[i] * r_jiz;
const T kern21_i = c12[i] * r_jix;
const T kern22_i = c22[i] * r_jiy;
const T kern23_i = c23[i] * r_jiz;
const T kern31_i = c13[i] * r_jix;
const T kern32_i = c23[i] * r_jiy;
const T kern33_i = c33[i] * r_jiz;
const T kern11_j = c11[j] * r_jix;
const T kern12_j = c12[j] * r_jiy;
const T kern13_j = c13[j] * r_jiz;
const T kern21_j = c12[j] * r_jix;
const T kern22_j = c22[j] * r_jiy;
const T kern23_j = c23[j] * r_jiz;
const T kern31_j = c13[j] * r_jix;
const T kern32_j = c23[j] * r_jiy;
const T kern33_j = c33[j] * r_jiz;
const T termA1_i = (kern11_i + kern12_i + kern13_i) * W1;
const T termA2_i = (kern21_i + kern22_i + kern23_i) * W1;
const T termA3_i = (kern31_i + kern32_i + kern33_i) * W1;
const T termA1_j = (kern11_j + kern12_j + kern13_j) * W2;
const T termA2_j = (kern21_j + kern22_j + kern23_j) * W2;
const T termA3_j = (kern31_j + kern32_j + kern33_j) * W2;
const T pro_i = p[i] / (gradh_i * ro[i] * ro[i]);
const T pro_j = p[j] / (gradh_j * ro[j] * ro[j]);
const T r_square = dist * dist;
const T viscosity_ij = artificial_viscosity(ro[i], ro[j], h[i], h[j], c[i], c[j], rv, r_square);
const T grad_Px_AV = 0.5 * (m[i] / ro[i] * viscosity_ij * termA1_i + m[j] / ro[j] * viscosity_ij * termA1_j);
const T grad_Py_AV = 0.5 * (m[i] / ro[i] * viscosity_ij * termA2_i + m[j] / ro[j] * viscosity_ij * termA2_j);
const T grad_Pz_AV = 0.5 * (m[i] / ro[i] * viscosity_ij * termA3_i + m[j] / ro[j] * viscosity_ij * termA3_j);
momentum_x += m[j] * (pro_i * termA1_i + pro_j * termA1_j) + grad_Px_AV;
momentum_y += m[j] * (pro_i * termA2_i + pro_j * termA2_j) + grad_Py_AV;
momentum_z += m[j] * (pro_i * termA3_i + pro_j * termA3_j) + grad_Pz_AV;
energy += m[j] * 2.0 * pro_i * (v_ijx * termA1_i + v_ijy * termA2_i + v_ijz * termA3_i);
energyAV += grad_Px_AV * v_ijx + grad_Py_AV * v_ijy + grad_Pz_AV * v_ijz;
}
du[tid] = 0.5 * (energy + energyAV);
grad_P_x[tid] = momentum_x;
grad_P_y[tid] = momentum_y;
grad_P_z[tid] = momentum_z;
}
} // namespace kernels
template void computeMomentumAndEnergyIAD<double, SqPatch<double>>(const std::vector<int> &clist, SqPatch<double> &d);
template <typename T, class Dataset>
void computeMomentumAndEnergyIAD(const std::vector<int> &clist, Dataset &d)
{
const size_t n = clist.size();
const size_t np = d.x.size();
const size_t allNeighbors = n * d.ngmax;
const size_t size_bbox = sizeof(BBox<T>);
const size_t size_np_T = np * sizeof(T);
const size_t size_n_int = n * sizeof(int);
const size_t size_n_T = n * sizeof(T);
const size_t size_allNeighbors = allNeighbors * sizeof(int);
int *d_clist, *d_neighbors, *d_neighborsCount;
T *d_x, *d_y, *d_z, *d_vx, *d_vy, *d_vz, *d_m, *d_h, *d_ro, *d_p, *d_c, *d_c11, *d_c12, *d_c13, *d_c22, *d_c23, *d_c33;
BBox<T> *d_bbox;
T *d_grad_P_x, *d_grad_P_y, *d_grad_P_z, *d_du;
// input data
CHECK_CUDA_ERR(utils::cudaMalloc(size_n_int, d_clist, d_neighborsCount));
CHECK_CUDA_ERR(utils::cudaMalloc(size_allNeighbors, d_neighbors));
CHECK_CUDA_ERR(utils::cudaMalloc(size_bbox, d_bbox));
CHECK_CUDA_ERR(
utils::cudaMalloc(size_np_T, d_x, d_y, d_z, d_vx, d_vy, d_vz, d_h, d_m, d_ro, d_p, d_c, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33));
// oputput data
CHECK_CUDA_ERR(utils::cudaMalloc(size_n_T, d_grad_P_x, d_grad_P_y, d_grad_P_z, d_du));
CHECK_CUDA_ERR(cudaMemcpy(d_x, d.x.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_y, d.y.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_z, d.z.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_vx, d.vx.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_vy, d.vy.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_vz, d.vz.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_h, d.h.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_m, d.m.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_ro, d.ro.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_p, d.p.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_c, d.c.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_c11, d.c11.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_c12, d.c12.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_c13, d.c13.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_c22, d.c22.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_c23, d.c23.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_c33, d.c33.data(), size_np_T, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_bbox, &d.bbox, size_bbox, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_clist, clist.data(), size_n_int, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_neighbors, d.neighbors.data(), size_allNeighbors, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaMemcpy(d_neighborsCount, d.neighborsCount.data(), size_n_int, cudaMemcpyHostToDevice));
const int threadsPerBlock = 256;
const int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
kernels::computeMomentumAndEnergyIAD<<<blocksPerGrid, threadsPerBlock>>>(
n, d.sincIndex, d.K, d.ngmax, d_bbox, d_clist, d_neighbors, d_neighborsCount, d_x, d_y, d_z, d_vx, d_vy, d_vz, d_h, d_m, d_ro, d_p,
d_c, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33, d_grad_P_x, d_grad_P_y, d_grad_P_z, d_du);
CHECK_CUDA_ERR(cudaGetLastError());
CHECK_CUDA_ERR(cudaMemcpy(d.grad_P_x.data(), d_grad_P_x, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.grad_P_y.data(), d_grad_P_y, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.grad_P_z.data(), d_grad_P_z, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaMemcpy(d.du.data(), d_du, size_n_T, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(utils::cudaFree(d_clist, d_neighborsCount, d_neighbors, d_bbox, d_x, d_y, d_z, d_vx, d_vy, d_vz, d_h, d_m, d_ro, d_p,
d_c, d_c11, d_c12, d_c13, d_c22, d_c23, d_c33, d_grad_P_x, d_grad_P_y, d_grad_P_z, d_du));
}
} // namespace cuda
} // namespace sph
} // namespace sphexa
|
214e58ffc03373920d1cdf1ede39deb716e941b9.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <functions/log.cuh>
#include <raft/cuda_utils.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogInputs {
T tolerance;
int len;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const LogInputs<T>& dims)
{
return os;
}
template <typename T>
class LogTest : public ::testing::TestWithParam<LogInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<LogInputs<T>>::GetParam();
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
int len = params.len;
raft::allocate(data, len);
T data_h[params.len] = {2.1, 4.5, 0.34, 10.0};
raft::update_device(data, data_h, len, stream);
raft::allocate(result, len);
raft::allocate(result_ref, len);
T result_ref_h[params.len] = {0.74193734, 1.5040774, -1.07880966, 2.30258509};
raft::update_device(result_ref, result_ref_h, len, stream);
f_log(result, data, T(1), len, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override
{
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(result));
CUDA_CHECK(hipFree(result_ref));
}
protected:
LogInputs<T> params;
T *data, *result, *result_ref;
};
const std::vector<LogInputs<float>> inputsf2 = {{0.001f, 4}};
const std::vector<LogInputs<double>> inputsd2 = {{0.001, 4}};
typedef LogTest<float> LogTestValF;
TEST_P(LogTestValF, Result)
{
ASSERT_TRUE(
devArrMatch(result_ref, result, params.len, raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef LogTest<double> LogTestValD;
TEST_P(LogTestValD, Result)
{
ASSERT_TRUE(
devArrMatch(result_ref, result, params.len, raft::CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValD, ::testing::ValuesIn(inputsd2));
} // end namespace Functions
} // end namespace MLCommon
|
214e58ffc03373920d1cdf1ede39deb716e941b9.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <functions/log.cuh>
#include <raft/cuda_utils.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogInputs {
T tolerance;
int len;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const LogInputs<T>& dims)
{
return os;
}
template <typename T>
class LogTest : public ::testing::TestWithParam<LogInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<LogInputs<T>>::GetParam();
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
int len = params.len;
raft::allocate(data, len);
T data_h[params.len] = {2.1, 4.5, 0.34, 10.0};
raft::update_device(data, data_h, len, stream);
raft::allocate(result, len);
raft::allocate(result_ref, len);
T result_ref_h[params.len] = {0.74193734, 1.5040774, -1.07880966, 2.30258509};
raft::update_device(result_ref, result_ref_h, len, stream);
f_log(result, data, T(1), len, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override
{
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(result));
CUDA_CHECK(cudaFree(result_ref));
}
protected:
LogInputs<T> params;
T *data, *result, *result_ref;
};
const std::vector<LogInputs<float>> inputsf2 = {{0.001f, 4}};
const std::vector<LogInputs<double>> inputsd2 = {{0.001, 4}};
typedef LogTest<float> LogTestValF;
TEST_P(LogTestValF, Result)
{
ASSERT_TRUE(
devArrMatch(result_ref, result, params.len, raft::CompareApproxAbs<float>(params.tolerance)));
}
typedef LogTest<double> LogTestValD;
TEST_P(LogTestValD, Result)
{
ASSERT_TRUE(
devArrMatch(result_ref, result, params.len, raft::CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(LogTests, LogTestValD, ::testing::ValuesIn(inputsd2));
} // end namespace Functions
} // end namespace MLCommon
|
7df7b34f7eb5769139ce8762032d5d374e678d4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_math.h>
#include <helper_functions.h>
#include <helper_cuda.h> // CUDA device initialization helper functions
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
__constant__ float cGaussian[64]; //gaussian array in device side
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex;
uint *dImage = NULL; //original image
uint *dTemp = NULL; //temp array for iterations
size_t pitch;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2)
__device__ float euclideanLen(float4 a, float4 b, float d)
{
float mod = (b.x - a.x) * (b.x - a.x) +
(b.y - a.y) * (b.y - a.y) +
(b.z - a.z) * (b.z - a.z);
return __expf(-mod / (2.f * d * d));
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0]
rgba.y = __saturatef(fabs(rgba.y));
rgba.z = __saturatef(fabs(rgba.z));
rgba.w = __saturatef(fabs(rgba.w));
return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f);
}
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od, int w, int h,
float e_d, int r)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= w || y >= h)
{
return;
}
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
float4 center = tex2D(rgbaTex, x, y);
for (int i = -r; i <= r; i++)
{
for (int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt(t/sum);
}
class d_bilateral_filter_functor
{
float e_d;
int r;
public:
d_bilateral_filter_functor(float e_d, int r)
{
this->e_d = e_d;
this->r = r;
}
__device__ int operator() ( const thrust::window_2d<uchar4> &input, const thrust::window_2d<uint> &output ) const
{
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
uchar4 center_int = input[make_int2(r,r)];
float4 center = {(float)center_int.x,(float)center_int.y,(float)center_int.z,(float)center_int.w};
for (int i = 0; i <= 2*r; i++)
{
for (int j = 0; j <= 2*r; j++)
{
uchar4 temp = input[make_int2(r,r)];
float4 curPix = {(float)temp.x,(float)temp.y,(float)temp.z,(float)temp.w,};
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
output[r][r]=rgbaFloatToInt(t/sum);\
return 1;
}
};
extern "C"
void initTexture(int width, int height, uint *hImage)
{
// copy image data to array
checkCudaErrors(hipMallocPitch(&dImage, &pitch, sizeof(uint)*width, height));
checkCudaErrors(hipMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height));
checkCudaErrors(hipMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width,
sizeof(uint)*width, height, hipMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
checkCudaErrors(hipFree(dImage));
checkCudaErrors(hipFree(dTemp));
}
/*
Because a 2D gaussian mask is symmetry in row and column,
here only generate a 1D mask, and use the product by row
and column index later.
1D gaussian distribution :
g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier
parameters:
og - output gaussian array in global memory
delta - the 2nd parameter 'd' in the above function
radius - half of the filter size
(total filter size = 2 * radius + 1)
*/
extern "C"
void updateGaussian(float delta, int radius)
{
float fGaussian[64];
for (int i = 0; i < 2*radius + 1; ++i)
{
float x = i-radius;
fGaussian[i] = expf(-(x*x) / (2*delta*delta));
}
checkCudaErrors(hipMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1)));
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *dDest,
int width, int height,
float e_d, int radius, int iterations,
StopWatchInterface *timer)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>();
checkCudaErrors(hipBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch));
thrust::block_2d<uchar4> d_image_block(width,height);
d_image_block.upload((uchar4*)dImage,hipMemoryTypeDevice);
thrust::block_2d<uint> d_dest_block(width,height);
thrust::device_vector<int> nulla(width*height);
thrust::window_vector<uchar4> input_wv(&d_image_block,2*radius+1,2*radius+1,1,1);
thrust::window_vector<uint> output_wv(&d_dest_block,2*radius+1,2*radius+1,1,1);
for (int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&timer);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
thrust::transform(thrust::hip::texture,input_wv.begin(),input_wv.end(),output_wv.begin(),d_bilateral_filter_functor(e_d,radius));
// d_dest_block.download(dDest,hipMemoryTypeDevice);
// sync host and stop computation timer
checkCudaErrors(hipDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// checkCudaErrors(hipMemcpy2D(dTemp, pitch, d_dest_block.data_pointer, sizeof(int)*width,
// sizeof(int)*width, height, hipMemcpyDeviceToDevice));
// checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch));
}
}
return ((dKernelTime/1000.)/(double)iterations);
}
|
7df7b34f7eb5769139ce8762032d5d374e678d4a.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_math.h>
#include <helper_functions.h>
#include <helper_cuda.h> // CUDA device initialization helper functions
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
__constant__ float cGaussian[64]; //gaussian array in device side
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex;
uint *dImage = NULL; //original image
uint *dTemp = NULL; //temp array for iterations
size_t pitch;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2)
__device__ float euclideanLen(float4 a, float4 b, float d)
{
float mod = (b.x - a.x) * (b.x - a.x) +
(b.y - a.y) * (b.y - a.y) +
(b.z - a.z) * (b.z - a.z);
return __expf(-mod / (2.f * d * d));
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0]
rgba.y = __saturatef(fabs(rgba.y));
rgba.z = __saturatef(fabs(rgba.z));
rgba.w = __saturatef(fabs(rgba.w));
return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f);
}
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od, int w, int h,
float e_d, int r)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= w || y >= h)
{
return;
}
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
float4 center = tex2D(rgbaTex, x, y);
for (int i = -r; i <= r; i++)
{
for (int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt(t/sum);
}
class d_bilateral_filter_functor
{
float e_d;
int r;
public:
d_bilateral_filter_functor(float e_d, int r)
{
this->e_d = e_d;
this->r = r;
}
__device__ int operator() ( const thrust::window_2d<uchar4> &input, const thrust::window_2d<uint> &output ) const
{
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
uchar4 center_int = input[make_int2(r,r)];
float4 center = {(float)center_int.x,(float)center_int.y,(float)center_int.z,(float)center_int.w};
for (int i = 0; i <= 2*r; i++)
{
for (int j = 0; j <= 2*r; j++)
{
uchar4 temp = input[make_int2(r,r)];
float4 curPix = {(float)temp.x,(float)temp.y,(float)temp.z,(float)temp.w,};
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
output[r][r]=rgbaFloatToInt(t/sum);\
return 1;
}
};
extern "C"
void initTexture(int width, int height, uint *hImage)
{
// copy image data to array
checkCudaErrors(cudaMallocPitch(&dImage, &pitch, sizeof(uint)*width, height));
checkCudaErrors(cudaMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height));
checkCudaErrors(cudaMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width,
sizeof(uint)*width, height, cudaMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
checkCudaErrors(cudaFree(dImage));
checkCudaErrors(cudaFree(dTemp));
}
/*
Because a 2D gaussian mask is symmetry in row and column,
here only generate a 1D mask, and use the product by row
and column index later.
1D gaussian distribution :
g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier
parameters:
og - output gaussian array in global memory
delta - the 2nd parameter 'd' in the above function
radius - half of the filter size
(total filter size = 2 * radius + 1)
*/
extern "C"
void updateGaussian(float delta, int radius)
{
float fGaussian[64];
for (int i = 0; i < 2*radius + 1; ++i)
{
float x = i-radius;
fGaussian[i] = expf(-(x*x) / (2*delta*delta));
}
checkCudaErrors(cudaMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1)));
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *dDest,
int width, int height,
float e_d, int radius, int iterations,
StopWatchInterface *timer)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch));
thrust::block_2d<uchar4> d_image_block(width,height);
d_image_block.upload((uchar4*)dImage,cudaMemoryTypeDevice);
thrust::block_2d<uint> d_dest_block(width,height);
thrust::device_vector<int> nulla(width*height);
thrust::window_vector<uchar4> input_wv(&d_image_block,2*radius+1,2*radius+1,1,1);
thrust::window_vector<uint> output_wv(&d_dest_block,2*radius+1,2*radius+1,1,1);
for (int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&timer);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
thrust::transform(thrust::cuda::texture,input_wv.begin(),input_wv.end(),output_wv.begin(),d_bilateral_filter_functor(e_d,radius));
// d_dest_block.download(dDest,cudaMemoryTypeDevice);
// sync host and stop computation timer
checkCudaErrors(cudaDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// checkCudaErrors(cudaMemcpy2D(dTemp, pitch, d_dest_block.data_pointer, sizeof(int)*width,
// sizeof(int)*width, height, cudaMemcpyDeviceToDevice));
// checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch));
}
}
return ((dKernelTime/1000.)/(double)iterations);
}
|
4bca550bfc57ff763e4735c637e21935427cdd6b.hip
|
// !!! This is a file automatically generated by hipify!!!
//! !
// ,
//
// preprocessing_phi() , MakeTempLoc()
#define _CRT_SECURE_NO_WARNINGS
#include "device_launch_parameters.h"
#include "cuda_by_example/common/book.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<sys/time.h>
#include<cstdlib>
#include<stdio.h>
#include<fstream>
#include<cstring>
//Merge Sort .
#define Repeat 5
#define MAX_COUNT 1'000
#define ThreadCount 1'024
#define CopySize 1'000'005
#define GpuTextLen 100
using namespace std;
typedef pair<int,int> P;
__constant__ int DevLoc[16'000]; //MAX
__constant__ int DevPreCalFac[10];
//Input Folder Name
string InputFolder = "./TESTCASE/TC-";
string OutputFolder = "./JournalV1OUTPUT/TC-";
string TimeFolder = "./JournalV1TIME/";
string TextInput = "TextSample";
string PatternInput = "IntStr";
string TimeInput = "TimeRecord_";
struct timeval PreStart, PreEnd, SearchStart, SearchEnd, TotalStart, TotalEnd, CopyToHostStart, CopyToHostEnd;
int PreCalFac[10] = { 0, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880}; //0!~9!
void InputData(int ** Pattern, int * Text, int PatternCount, int PatternLen, int TextLen, int FolderNumber){
//Pattern input
string pattern_filename = InputFolder + to_string(FolderNumber)+"/"+PatternInput + "_" + to_string(PatternCount) + "_" +to_string(PatternLen) + ".txt";
ifstream pattern(pattern_filename);
for (int i = 0; i < PatternCount; i++) {
for (int j = 0; j < PatternLen; j++) {
pattern >> Pattern[i][j];
}
}
pattern.close();
//Text input
string text_filename = InputFolder + to_string(FolderNumber)+"/"+ TextInput + "_" + to_string(TextLen) + ".txt";
ifstream text(text_filename);
for (int i = 0; i < TextLen; i++) {
text >> Text[i];
}
text.close();
return ;
}
void OutputData(int PatternCount, int PatternLen, int TextLen,int BlockSize, int FolderNumber,int MatchRes, bool * MatchResDetail){
string FileName = OutputFolder+ to_string(FolderNumber)+"/"+PatternInput + "_" +
to_string(PatternCount) + "_" +to_string(PatternLen) +"_"+to_string(TextLen) +"_"+to_string(BlockSize) + ".txt";
ofstream FileStream(FileName);
FileStream<<MatchRes;
/*FileStream<<"\n";
for(int t=0;t<TextLen; t++){
FileStream<<MatchResDetail[t]<<" ";
}*/
FileStream.close();
}
void OutputTime(double Pre, float Search, double Total,double TotalCopy, int PatternCount,int PatternLen, int TextLen,int BlockSize){
string FileName = TimeFolder + PatternInput + "_" +
to_string(PatternCount) + "_" + to_string(PatternLen) + "_" +
to_string(TextLen) + "_" + to_string(BlockSize)+".txt";
ofstream FileStream(FileName);
FileStream<<(double)(Pre)/Repeat<<" "<<(double)(Search)/Repeat<<" "
<<(double)(Total)/Repeat<<" "<<(double)(TotalCopy)/Repeat;
FileStream.close();
}
ofstream GetFileStream(int PatternCount, int PatternLen){
string FileName = OutputFolder + "FP_" + to_string(PatternCount) + "_" + to_string(PatternLen) + ".txt";
ofstream FileStream(FileName);
return FileStream;
}
int FindLen(int* p, int PatternLen) {
int ret = PatternLen;
for (int i = 0; i < PatternLen; i++) {
if (p[i] < 0 || p[i] == 0) {
ret = i;
break;
}
}
return ret;
}
void merge(int first, int mid, int last, P* arr) {
int idx = first;
P TempArr[MAX_COUNT];
int i = first, j = mid + 1;
while (i <= mid && j <= last) {
if (arr[i] <= arr[j]) {
TempArr[idx] = arr[i];
idx++;
i++;
}
else if (arr[i] > arr[j]) {
TempArr[idx] = arr[j];
idx++;
j++;
}
}
if (i > mid) {
for (int m = j; m <= last; m++) {
TempArr[idx] = arr[m];
idx++;
}
}
else {
for (int m = i; m <= mid; m++) {
TempArr[idx] = arr[m];
idx++;
}
}
for (int m = first; m <= last; m++) {
arr[m] = TempArr[m];
}
}
void mergeSort(int first, int last, P* TempPattern) {
if (first < last) {
int mid = (first + last) / 2;
mergeSort(first, mid, TempPattern);
mergeSort(mid + 1, last, TempPattern);
merge(first, mid, last, TempPattern);
}
}
int FindMax(int* p, int len) {
int ret = 0;
for (int i = 0; i < len; i++) {
if (p[i] > ret)
ret = p[i];
}
return ret;
}
int CalQgram(int* Pattern, int StartIdx, int PatternLen, int BlockSize) {
int result = 0;
int count;
for (int j = StartIdx; j < StartIdx + BlockSize; j++) {
count = 0;
for (int k = StartIdx; k < j; k++) {
if (Pattern[k] <= Pattern[j])
count++;
}
result += count * PreCalFac[j-StartIdx];
}
return result;
}
__device__ int DevCalQgram(int Text[], int StartIdx, int PatternLen, int BlockSize){
int result = 0;
int count;
for (int j = StartIdx; j < StartIdx + BlockSize; j++) {
count = 0;
for (int k = StartIdx; k < j; k++) {
if (Text[k] <= Text[j])
count++;
}
result += count * DevPreCalFac[j - StartIdx];
}
return result;
}
//Len PatternLen
// Len .
//Loc table * => * 2 1
void MakeLoc(P* TempPattern, int* Loc, int Len, int PatternCount,int PatternLen, int CurPatternIdx) {
for (int i = 0; i < Len; i++) {
int idx = CurPatternIdx + i * PatternCount;
Loc[idx] = TempPattern[i].second;
}
}
void MakeE(int* Pattern, int* Loc, int* E, int Len,int PatternCount, int CurPatternIdx) {
for (int i = 0; i < Len - 1; i++) {
int idx = CurPatternIdx + i * PatternCount;
if (Pattern[Loc[idx]] == Pattern[Loc[idx + PatternCount]])
E[idx] = 1;
else
E[idx] = 0;
}
}
void FillLoc(int ** Pattern, int * Loc, int* E, int PatternCount, int PatternLen){
int Len;
P* TempPattern;
for (int i = 0; i < PatternCount; i++) {
Len = FindLen(Pattern[i], PatternLen);
TempPattern = new P[Len];
for (int j = 0; j < Len; j++) {
TempPattern[j].first = Pattern[i][j];
TempPattern[j].second = j;
}
mergeSort(0, Len - 1, TempPattern);
MakeLoc(TempPattern, Loc, Len, PatternCount, PatternLen, i);
MakeE(Pattern[i], Loc, E, Len, PatternCount, i);
delete[] TempPattern;
}
}
void FillHash(int **Pattern, int BlockSize, int PatternCount, int PatternLen, int * Hash){
int range = PatternLen - BlockSize + 1;
for (int i = 0; i < PatternCount; i++) {
Hash[i] = CalQgram(Pattern[i], range - 1, PatternLen, BlockSize);
}
}
//__device__ InitSharedMemory()
__device__ bool CheckOP(int Text[], int* E, int StartIdx, int PatternLen, int PatternIdx, int PatternCount) {
bool ret = true;
for (int i = 0; i < PatternLen-1; i++) {
int idx = PatternCount * i + PatternIdx;
if (E[idx] == 0) {
if (Text[StartIdx + DevLoc[idx]] >= Text[StartIdx + DevLoc[idx + PatternCount]]) {
ret = false;
break;
}
}
else {
if (Text[StartIdx + DevLoc[idx]] != Text[StartIdx + DevLoc[idx + PatternCount]]) {
ret = false;
break;
}
}
}
return ret;
}
__global__ void Search(int * DevText, int * DevHash,int * DevE,int * DevMatchRes,
int TextLen, int PatternCount, int PatternLen,int BlockSize,bool * DevMatchDetail){
extern __shared__ int sharedText[]; //dynamic allocation
int bidx = blockIdx.x;
int tidx = threadIdx.x;
int TextRange = GpuTextLen + PatternLen;
int TextStart = bidx * GpuTextLen;
// block .
int CurTextLen = (TextLen/GpuTextLen) -1 == bidx ? GpuTextLen-PatternLen : GpuTextLen;
if(tidx<TextRange && (TextStart + tidx < TextLen)){
sharedText[tidx] = DevText[TextStart+tidx];
}
__syncthreads();
if(tidx<PatternCount){
for(int i=0; i < CurTextLen; i++){
int temp = DevCalQgram(sharedText, i+PatternLen-BlockSize, PatternLen, BlockSize);
if(temp == DevHash[tidx]){
if(CheckOP(sharedText, DevE, i,PatternLen, tidx, PatternCount)){
//atomicAdd(&DevMatchRes[0], 1);
DevMatchDetail[(TextStart+i) + (tidx * TextLen)] = true;
}
}
}
}
__syncthreads();
}
extern "C" void InitLocGpu(int * Loc,int PatternCount, int PatternLen)
{
HANDLE_ERROR(hipMemcpyToSymbol(DevLoc, Loc, PatternCount * PatternLen * sizeof(int)));
HANDLE_ERROR(hipMemcpyToSymbol(DevPreCalFac, PreCalFac, 10 * sizeof(int)));
}
void FreeVariable(int * DevMatchRes,int * DevHash,int * DevText, int *DevE,
int * Text, int **Pattern,int * Loc,int * Hash,int * E, int PatternCount,int * MatchRes, bool *MatchResDetail, bool * DevMatchDetail){
for(int i=0;i<PatternCount;i++){
delete[] Pattern[i];
}
delete[] Text;
delete[] Loc;
delete[] Hash;
delete[] E;
delete[] MatchRes;
delete[] MatchResDetail;
hipFree(DevE);
hipFree(DevMatchRes);
hipFree(DevHash);
hipFree(DevText);
hipFree(DevMatchDetail);
}
void PrintTestInfo(int PatternCount,int PatternLen,int TextLen, int MatchRes){
printf("Pattern count: %d Pattern_length : %d TEXT SIZE : %d\nOP size : %d\n\n", PatternCount, PatternLen,TextLen, MatchRes);
}
int main(){
int ** Pattern;
int * Loc;
int * E;
int * Hash;
int * Text;
int * MatchRes;
bool * MatchResDetail;
//GPU variables
int * DevMatchRes;
int * DevHash;
int * DevText;
int * DevE;
bool * DevMatchDetail;
for (int BlockSize = 7; BlockSize <= 7; BlockSize++) {
for (int PatternCount = 100; PatternCount <= 1'000; PatternCount += 100) { // 100~1000
for (int PatternLen = 7; PatternLen <= 15; PatternLen += 1) { //3~15
printf("Pattern Count: %d\nPattern Len : %d\n",PatternCount, PatternLen);
for (int TextLen = 100'000; TextLen <= 1'000'000; TextLen += 100'000) { //100'000 ~ 1'000'000
double sec, usec;
double TotalPre = 0;
double TotalSearch = 0;
double Total = 0;
double TotalCopy = 0;
for(int FolderNumber = 0;FolderNumber < Repeat;FolderNumber++){
Text = new int[TextLen];
//!Warning! Only this two table is row * col => PatternLen * PatternCount
Loc = new int[PatternLen * PatternCount];
E = new int[PatternLen * PatternCount];
Hash = new int[PatternCount];
Pattern = new int*[PatternCount];
for (int i = 0; i < PatternCount; i++) {
Pattern[i] = new int[PatternLen];
}
MatchResDetail = new bool[TextLen * PatternCount];
//Read Text and Pattern
InputData(Pattern, Text, PatternCount, PatternLen, TextLen,FolderNumber);
gettimeofday(&TotalStart, NULL);
//Fill the Location table
gettimeofday(&PreStart, NULL);
FillLoc(Pattern, Loc, E, PatternCount, PatternLen);
//Fill the hash table
FillHash(Pattern, BlockSize, PatternCount, PatternLen, Hash);
gettimeofday(&PreEnd, NULL);
//GPU Init !InitLocGpu !
InitLocGpu(Loc, PatternCount, PatternLen);
//GPU init
HANDLE_ERROR(hipMalloc((void**)&DevMatchRes, sizeof(int) * 1));
HANDLE_ERROR(hipMalloc((void**)&DevHash, sizeof(int) * PatternCount));
HANDLE_ERROR(hipMalloc((void**)&DevText, sizeof(int) * TextLen));
HANDLE_ERROR(hipMalloc((void**)&DevE, sizeof(int) * PatternCount * PatternLen));
HANDLE_ERROR(hipMalloc((void**)&DevMatchDetail, TextLen*PatternCount * sizeof(bool)));
HANDLE_ERROR(hipMemcpy(DevHash, Hash, sizeof(int) * PatternCount, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(DevText, Text, sizeof(int) * TextLen, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(DevE, E, sizeof(int) * PatternCount * PatternLen, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemset(DevMatchRes, 0, sizeof(int)));
HANDLE_ERROR(hipMemset(DevMatchDetail, 0 ,TextLen*PatternCount*sizeof(bool)));
//Kernel !3rd parameter is shared memory size in byte. Take care!
gettimeofday(&SearchStart, NULL);
//
hipLaunchKernelGGL(( Search), dim3((TextLen/GpuTextLen)), dim3(ThreadCount), 1000, 0, DevText, DevHash, DevE, DevMatchRes, TextLen, PatternCount, PatternLen,BlockSize,DevMatchDetail);
hipDeviceSynchronize();
gettimeofday(&SearchEnd, NULL);
MatchRes = new int[2];
gettimeofday(&CopyToHostStart,NULL);
HANDLE_ERROR(hipMemcpy(MatchResDetail, DevMatchDetail, sizeof(bool) * TextLen * PatternCount, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(MatchRes, DevMatchRes, sizeof(int), hipMemcpyDeviceToHost));
gettimeofday(&CopyToHostEnd,NULL);
//PrintTestInfo(PatternCount, PatternLen,TextLen, MatchRes[0]);
OutputData(PatternCount, PatternLen, TextLen, BlockSize,FolderNumber, MatchRes[0], MatchResDetail);
//Freeing Variable
FreeVariable(DevMatchRes, DevHash, DevText,DevE, Text, Pattern, Loc, Hash, E, PatternCount, MatchRes, MatchResDetail, DevMatchDetail);
gettimeofday(&TotalEnd, NULL);
sec = TotalEnd.tv_sec - TotalStart.tv_sec;
usec = TotalEnd.tv_usec - TotalStart.tv_usec;
Total += (sec*1000+usec/1000.0);
sec = PreEnd.tv_sec - PreStart.tv_sec;
usec = PreEnd.tv_usec - PreStart.tv_usec;
TotalPre += (sec*1000+usec/1000.0);
sec = SearchEnd.tv_sec - SearchStart.tv_sec;
usec = SearchEnd.tv_usec - SearchStart.tv_usec;
TotalSearch += (sec*1000+usec/1000.0);
sec = CopyToHostEnd.tv_sec - CopyToHostStart.tv_sec;
usec = CopyToHostEnd.tv_usec - CopyToHostStart.tv_usec;
TotalCopy += (sec*1000+usec/1000.0);
}
//Folder End
OutputTime(TotalPre, TotalSearch, Total,TotalCopy,PatternCount,PatternLen, TextLen,BlockSize);
}
}
}
}
return 0;
}
|
4bca550bfc57ff763e4735c637e21935427cdd6b.cu
|
//!매치 정보는 순위동형이 발생하는 텍스트에서의 위치만 전달한다!
//더 효율적으로 짤수 있지만 병렬화 전의 논문과의 비교를 위해 다른 인자는 통일해야 한다고 생각, 효율화 하지 않음
//검색단계만 병렬적으로 효율화를 함
//하지만 기존의 preprocessing_phi()는 너무 비효율적으로 작동하여 수정함, 대응되는게 MakeTempLoc()
#define _CRT_SECURE_NO_WARNINGS
#include "device_launch_parameters.h"
#include "cuda_by_example/common/book.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<sys/time.h>
#include<cstdlib>
#include<stdio.h>
#include<fstream>
#include<cstring>
//Merge Sort에서 사용하는 값. 패턴의 길이를 넘어가지 않음
#define Repeat 5
#define MAX_COUNT 1'000
#define ThreadCount 1'024
#define CopySize 1'000'005
#define GpuTextLen 100
using namespace std;
typedef pair<int,int> P;
__constant__ int DevLoc[16'000]; //MAX
__constant__ int DevPreCalFac[10];
//Input Folder Name
string InputFolder = "./TESTCASE/TC-";
string OutputFolder = "./JournalV1OUTPUT/TC-";
string TimeFolder = "./JournalV1TIME/";
string TextInput = "TextSample";
string PatternInput = "IntStr";
string TimeInput = "TimeRecord_";
struct timeval PreStart, PreEnd, SearchStart, SearchEnd, TotalStart, TotalEnd, CopyToHostStart, CopyToHostEnd;
int PreCalFac[10] = { 0, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880}; //0!~9!
void InputData(int ** Pattern, int * Text, int PatternCount, int PatternLen, int TextLen, int FolderNumber){
//Pattern input
string pattern_filename = InputFolder + to_string(FolderNumber)+"/"+PatternInput + "_" + to_string(PatternCount) + "_" +to_string(PatternLen) + ".txt";
ifstream pattern(pattern_filename);
for (int i = 0; i < PatternCount; i++) {
for (int j = 0; j < PatternLen; j++) {
pattern >> Pattern[i][j];
}
}
pattern.close();
//Text input
string text_filename = InputFolder + to_string(FolderNumber)+"/"+ TextInput + "_" + to_string(TextLen) + ".txt";
ifstream text(text_filename);
for (int i = 0; i < TextLen; i++) {
text >> Text[i];
}
text.close();
return ;
}
void OutputData(int PatternCount, int PatternLen, int TextLen,int BlockSize, int FolderNumber,int MatchRes, bool * MatchResDetail){
string FileName = OutputFolder+ to_string(FolderNumber)+"/"+PatternInput + "_" +
to_string(PatternCount) + "_" +to_string(PatternLen) +"_"+to_string(TextLen) +"_"+to_string(BlockSize) + ".txt";
ofstream FileStream(FileName);
FileStream<<MatchRes;
/*FileStream<<"\n";
for(int t=0;t<TextLen; t++){
FileStream<<MatchResDetail[t]<<" ";
}*/
FileStream.close();
}
void OutputTime(double Pre, float Search, double Total,double TotalCopy, int PatternCount,int PatternLen, int TextLen,int BlockSize){
string FileName = TimeFolder + PatternInput + "_" +
to_string(PatternCount) + "_" + to_string(PatternLen) + "_" +
to_string(TextLen) + "_" + to_string(BlockSize)+".txt";
ofstream FileStream(FileName);
FileStream<<(double)(Pre)/Repeat<<" "<<(double)(Search)/Repeat<<" "
<<(double)(Total)/Repeat<<" "<<(double)(TotalCopy)/Repeat;
FileStream.close();
}
ofstream GetFileStream(int PatternCount, int PatternLen){
string FileName = OutputFolder + "FP_" + to_string(PatternCount) + "_" + to_string(PatternLen) + ".txt";
ofstream FileStream(FileName);
return FileStream;
}
int FindLen(int* p, int PatternLen) {
int ret = PatternLen;
for (int i = 0; i < PatternLen; i++) {
if (p[i] < 0 || p[i] == 0) {
ret = i;
break;
}
}
return ret;
}
void merge(int first, int mid, int last, P* arr) {
int idx = first;
P TempArr[MAX_COUNT];
int i = first, j = mid + 1;
while (i <= mid && j <= last) {
if (arr[i] <= arr[j]) {
TempArr[idx] = arr[i];
idx++;
i++;
}
else if (arr[i] > arr[j]) {
TempArr[idx] = arr[j];
idx++;
j++;
}
}
if (i > mid) {
for (int m = j; m <= last; m++) {
TempArr[idx] = arr[m];
idx++;
}
}
else {
for (int m = i; m <= mid; m++) {
TempArr[idx] = arr[m];
idx++;
}
}
for (int m = first; m <= last; m++) {
arr[m] = TempArr[m];
}
}
void mergeSort(int first, int last, P* TempPattern) {
if (first < last) {
int mid = (first + last) / 2;
mergeSort(first, mid, TempPattern);
mergeSort(mid + 1, last, TempPattern);
merge(first, mid, last, TempPattern);
}
}
int FindMax(int* p, int len) {
int ret = 0;
for (int i = 0; i < len; i++) {
if (p[i] > ret)
ret = p[i];
}
return ret;
}
int CalQgram(int* Pattern, int StartIdx, int PatternLen, int BlockSize) {
int result = 0;
int count;
for (int j = StartIdx; j < StartIdx + BlockSize; j++) {
count = 0;
for (int k = StartIdx; k < j; k++) {
if (Pattern[k] <= Pattern[j])
count++;
}
result += count * PreCalFac[j-StartIdx];
}
return result;
}
__device__ int DevCalQgram(int Text[], int StartIdx, int PatternLen, int BlockSize){
int result = 0;
int count;
for (int j = StartIdx; j < StartIdx + BlockSize; j++) {
count = 0;
for (int k = StartIdx; k < j; k++) {
if (Text[k] <= Text[j])
count++;
}
result += count * DevPreCalFac[j - StartIdx];
}
return result;
}
//Len과 PatternLen은 중복되는 정보이나 본 알고리즘에서
//패턴의 길이가 전부다 다른 경우도 고려할 수 있도록 Len 변수는 남겨둠.
//Loc table은 가로 * 세로 => 패턴길이 * 패턴개수인 논리적으로는 2차원이지만 실제로는 1차원인 배열임
void MakeLoc(P* TempPattern, int* Loc, int Len, int PatternCount,int PatternLen, int CurPatternIdx) {
for (int i = 0; i < Len; i++) {
int idx = CurPatternIdx + i * PatternCount;
Loc[idx] = TempPattern[i].second;
}
}
void MakeE(int* Pattern, int* Loc, int* E, int Len,int PatternCount, int CurPatternIdx) {
for (int i = 0; i < Len - 1; i++) {
int idx = CurPatternIdx + i * PatternCount;
if (Pattern[Loc[idx]] == Pattern[Loc[idx + PatternCount]])
E[idx] = 1;
else
E[idx] = 0;
}
}
void FillLoc(int ** Pattern, int * Loc, int* E, int PatternCount, int PatternLen){
int Len;
P* TempPattern;
for (int i = 0; i < PatternCount; i++) {
Len = FindLen(Pattern[i], PatternLen);
TempPattern = new P[Len];
for (int j = 0; j < Len; j++) {
TempPattern[j].first = Pattern[i][j];
TempPattern[j].second = j;
}
mergeSort(0, Len - 1, TempPattern);
MakeLoc(TempPattern, Loc, Len, PatternCount, PatternLen, i);
MakeE(Pattern[i], Loc, E, Len, PatternCount, i);
delete[] TempPattern;
}
}
void FillHash(int **Pattern, int BlockSize, int PatternCount, int PatternLen, int * Hash){
int range = PatternLen - BlockSize + 1;
for (int i = 0; i < PatternCount; i++) {
Hash[i] = CalQgram(Pattern[i], range - 1, PatternLen, BlockSize);
}
}
//__device__ InitSharedMemory()
__device__ bool CheckOP(int Text[], int* E, int StartIdx, int PatternLen, int PatternIdx, int PatternCount) {
bool ret = true;
for (int i = 0; i < PatternLen-1; i++) {
int idx = PatternCount * i + PatternIdx;
if (E[idx] == 0) {
if (Text[StartIdx + DevLoc[idx]] >= Text[StartIdx + DevLoc[idx + PatternCount]]) {
ret = false;
break;
}
}
else {
if (Text[StartIdx + DevLoc[idx]] != Text[StartIdx + DevLoc[idx + PatternCount]]) {
ret = false;
break;
}
}
}
return ret;
}
__global__ void Search(int * DevText, int * DevHash,int * DevE,int * DevMatchRes,
int TextLen, int PatternCount, int PatternLen,int BlockSize,bool * DevMatchDetail){
extern __shared__ int sharedText[]; //dynamic allocation
int bidx = blockIdx.x;
int tidx = threadIdx.x;
int TextRange = GpuTextLen + PatternLen;
int TextStart = bidx * GpuTextLen;
//마지막 block일때 길이.
int CurTextLen = (TextLen/GpuTextLen) -1 == bidx ? GpuTextLen-PatternLen : GpuTextLen;
if(tidx<TextRange && (TextStart + tidx < TextLen)){
sharedText[tidx] = DevText[TextStart+tidx];
}
__syncthreads();
if(tidx<PatternCount){
for(int i=0; i < CurTextLen; i++){
int temp = DevCalQgram(sharedText, i+PatternLen-BlockSize, PatternLen, BlockSize);
if(temp == DevHash[tidx]){
if(CheckOP(sharedText, DevE, i,PatternLen, tidx, PatternCount)){
//atomicAdd(&DevMatchRes[0], 1);
DevMatchDetail[(TextStart+i) + (tidx * TextLen)] = true;
}
}
}
}
__syncthreads();
}
extern "C" void InitLocGpu(int * Loc,int PatternCount, int PatternLen)
{
HANDLE_ERROR(cudaMemcpyToSymbol(DevLoc, Loc, PatternCount * PatternLen * sizeof(int)));
HANDLE_ERROR(cudaMemcpyToSymbol(DevPreCalFac, PreCalFac, 10 * sizeof(int)));
}
void FreeVariable(int * DevMatchRes,int * DevHash,int * DevText, int *DevE,
int * Text, int **Pattern,int * Loc,int * Hash,int * E, int PatternCount,int * MatchRes, bool *MatchResDetail, bool * DevMatchDetail){
for(int i=0;i<PatternCount;i++){
delete[] Pattern[i];
}
delete[] Text;
delete[] Loc;
delete[] Hash;
delete[] E;
delete[] MatchRes;
delete[] MatchResDetail;
cudaFree(DevE);
cudaFree(DevMatchRes);
cudaFree(DevHash);
cudaFree(DevText);
cudaFree(DevMatchDetail);
}
void PrintTestInfo(int PatternCount,int PatternLen,int TextLen, int MatchRes){
printf("Pattern count: %d Pattern_length : %d TEXT SIZE : %d\nOP size : %d\n\n", PatternCount, PatternLen,TextLen, MatchRes);
}
int main(){
int ** Pattern;
int * Loc;
int * E;
int * Hash;
int * Text;
int * MatchRes;
bool * MatchResDetail;
//GPU variables
int * DevMatchRes;
int * DevHash;
int * DevText;
int * DevE;
bool * DevMatchDetail;
for (int BlockSize = 7; BlockSize <= 7; BlockSize++) {
for (int PatternCount = 100; PatternCount <= 1'000; PatternCount += 100) { // 100~1000
for (int PatternLen = 7; PatternLen <= 15; PatternLen += 1) { //3~15
printf("Pattern Count: %d\nPattern Len : %d\n",PatternCount, PatternLen);
for (int TextLen = 100'000; TextLen <= 1'000'000; TextLen += 100'000) { //100'000 ~ 1'000'000
double sec, usec;
double TotalPre = 0;
double TotalSearch = 0;
double Total = 0;
double TotalCopy = 0;
for(int FolderNumber = 0;FolderNumber < Repeat;FolderNumber++){
Text = new int[TextLen];
//!Warning! Only this two table is row * col => PatternLen * PatternCount
Loc = new int[PatternLen * PatternCount];
E = new int[PatternLen * PatternCount];
Hash = new int[PatternCount];
Pattern = new int*[PatternCount];
for (int i = 0; i < PatternCount; i++) {
Pattern[i] = new int[PatternLen];
}
MatchResDetail = new bool[TextLen * PatternCount];
//Read Text and Pattern
InputData(Pattern, Text, PatternCount, PatternLen, TextLen,FolderNumber);
gettimeofday(&TotalStart, NULL);
//Fill the Location table
gettimeofday(&PreStart, NULL);
FillLoc(Pattern, Loc, E, PatternCount, PatternLen);
//Fill the hash table
FillHash(Pattern, BlockSize, PatternCount, PatternLen, Hash);
gettimeofday(&PreEnd, NULL);
//GPU Init !InitLocGpu는 관리자 권한으로 실행해야함!
InitLocGpu(Loc, PatternCount, PatternLen);
//GPU init
HANDLE_ERROR(cudaMalloc((void**)&DevMatchRes, sizeof(int) * 1));
HANDLE_ERROR(cudaMalloc((void**)&DevHash, sizeof(int) * PatternCount));
HANDLE_ERROR(cudaMalloc((void**)&DevText, sizeof(int) * TextLen));
HANDLE_ERROR(cudaMalloc((void**)&DevE, sizeof(int) * PatternCount * PatternLen));
HANDLE_ERROR(cudaMalloc((void**)&DevMatchDetail, TextLen*PatternCount * sizeof(bool)));
HANDLE_ERROR(cudaMemcpy(DevHash, Hash, sizeof(int) * PatternCount, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(DevText, Text, sizeof(int) * TextLen, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(DevE, E, sizeof(int) * PatternCount * PatternLen, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemset(DevMatchRes, 0, sizeof(int)));
HANDLE_ERROR(cudaMemset(DevMatchDetail, 0 ,TextLen*PatternCount*sizeof(bool)));
//Kernel !3rd parameter is shared memory size in byte. Take care!
gettimeofday(&SearchStart, NULL);
//블럭개수 늘리기
Search<<<(TextLen/GpuTextLen), ThreadCount, 1000>>>(DevText, DevHash, DevE, DevMatchRes, TextLen, PatternCount, PatternLen,BlockSize,DevMatchDetail);
cudaDeviceSynchronize();
gettimeofday(&SearchEnd, NULL);
MatchRes = new int[2];
gettimeofday(&CopyToHostStart,NULL);
HANDLE_ERROR(cudaMemcpy(MatchResDetail, DevMatchDetail, sizeof(bool) * TextLen * PatternCount, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(MatchRes, DevMatchRes, sizeof(int), cudaMemcpyDeviceToHost));
gettimeofday(&CopyToHostEnd,NULL);
//PrintTestInfo(PatternCount, PatternLen,TextLen, MatchRes[0]);
OutputData(PatternCount, PatternLen, TextLen, BlockSize,FolderNumber, MatchRes[0], MatchResDetail);
//Freeing Variable
FreeVariable(DevMatchRes, DevHash, DevText,DevE, Text, Pattern, Loc, Hash, E, PatternCount, MatchRes, MatchResDetail, DevMatchDetail);
gettimeofday(&TotalEnd, NULL);
sec = TotalEnd.tv_sec - TotalStart.tv_sec;
usec = TotalEnd.tv_usec - TotalStart.tv_usec;
Total += (sec*1000+usec/1000.0);
sec = PreEnd.tv_sec - PreStart.tv_sec;
usec = PreEnd.tv_usec - PreStart.tv_usec;
TotalPre += (sec*1000+usec/1000.0);
sec = SearchEnd.tv_sec - SearchStart.tv_sec;
usec = SearchEnd.tv_usec - SearchStart.tv_usec;
TotalSearch += (sec*1000+usec/1000.0);
sec = CopyToHostEnd.tv_sec - CopyToHostStart.tv_sec;
usec = CopyToHostEnd.tv_usec - CopyToHostStart.tv_usec;
TotalCopy += (sec*1000+usec/1000.0);
}
//Folder End
OutputTime(TotalPre, TotalSearch, Total,TotalCopy,PatternCount,PatternLen, TextLen,BlockSize);
}
}
}
}
return 0;
}
|
0634f5e084daf0de0d024c0209408963b6718d9f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void multiply(unsigned int *a, unsigned int *b, unsigned int *c,
int n)
{
unsigned int i;
unsigned int product = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
for (i = 0; i < n; i++)
product += a[row * n + i] * b[i * n + col];
c[row*n + col] = product;
}
}
|
0634f5e084daf0de0d024c0209408963b6718d9f.cu
|
extern "C" __global__ void multiply(unsigned int *a, unsigned int *b, unsigned int *c,
int n)
{
unsigned int i;
unsigned int product = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
for (i = 0; i < n; i++)
product += a[row * n + i] * b[i * n + col];
c[row*n + col] = product;
}
}
|
a19191450419aeed6b23c756951d25bdacd94abf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
int DLGpuNormalInit(DLArrayHandle arr, const float mean, const float stddev, unsigned long long seed,
DLStreamHandle stream_handle=NULL) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
float *arr_data = (float *)arr->data;
hiprandGenerator_t gen;
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_PHILOX4_32_10));
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, seed));
if (stream_handle)
CURAND_CALL(hiprandSetStream(gen, *(hipStream_t*)stream_handle->handle));
CURAND_CALL(hiprandGenerateNormal(gen, arr_data, size, mean, stddev));
CURAND_CALL(hiprandDestroyGenerator(gen));
return 0;
}
// __global__ void init_scale_kernel(float *arr, const float lb, const float ub, size_t size) {
// size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
// if (ind >= size) return;
// arr[ind] = arr[ind] * (ub - lb) + lb;
// }
// int DLGpuUniformInit(DLArrayHandle arr, const float lb, const float ub, unsigned long long seed,
// DLStreamHandle stream_handle=NULL) {
// size_t size = 1;
// for (index_t i = 0; i < arr->ndim; i++) {
// size *= arr->shape[i];
// }
// float *arr_data = (float *)arr->data;
// hiprandGenerator_t gen;
// CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_PHILOX4_32_10));
// CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, seed));
// if (stream_handle)
// CURAND_CALL(hiprandSetStream(gen, *(hipStream_t*)stream_handle->handle));
// CURAND_CALL(hiprandGenerateUniform(gen, arr_data, size));
// CURAND_CALL(hiprandDestroyGenerator(gen));
// dim3 blocks;
// dim3 threads;
// if (size <= 1024) {
// threads.x = size;
// blocks.x = 1;
// } else {
// threads.x = 1024;
// blocks.x = (size + 1023) / 1024;
// }
// if (stream_handle) {
// hipLaunchKernelGGL(( init_scale_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, arr_data, lb, ub, size);
// } else {
// hipLaunchKernelGGL(( init_scale_kernel), dim3(blocks), dim3(threads), 0, 0, arr_data, lb, ub, size);
// }
// return 0;
// }
__global__ void init_scale_kernel(float *arr, const float lb, const float ub, unsigned long long seed, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
hiprandStatePhilox4_32_10_t state;
hiprand_init(seed + ind, 0, 0, &state);
arr[ind] = hiprand_uniform(&state) * (ub - lb) + lb;
}
int DLGpuUniformInit(DLArrayHandle arr, const float lb, const float ub, unsigned long long seed,
DLStreamHandle stream_handle=NULL) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
float *arr_data = (float *)arr->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
hipLaunchKernelGGL(( init_scale_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, arr_data, lb, ub, seed, size);
} else {
hipLaunchKernelGGL(( init_scale_kernel), dim3(blocks), dim3(threads), 0, 0, arr_data, lb, ub, seed, size);
}
return 0;
}
__global__ void truncated_normal_kernel(float *arr, const float mean, const float stddev, unsigned long long seed, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
hiprandStatePhilox4_32_10_t state;
bool not_found = true;
hiprand_init(seed + ind, 0, 0, &state);
float temp;
while (not_found) {
temp = hiprand_normal(&state);
not_found = (temp < -2 || temp > 2);
}
arr[ind] = temp * stddev + mean;
}
int DLGpuTruncatedNormalInit(DLArrayHandle arr, const float mean, const float stddev, unsigned long long seed,
DLStreamHandle stream_handle=NULL) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
float *arr_data = (float *)arr->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
hipLaunchKernelGGL(( truncated_normal_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, arr_data, mean, stddev, seed, size);
} else {
hipLaunchKernelGGL(( truncated_normal_kernel), dim3(blocks), dim3(threads), 0, 0, arr_data, mean, stddev, seed, size);
}
return 0;
}
|
a19191450419aeed6b23c756951d25bdacd94abf.cu
|
#include "gpu_runtime.h"
#include <curand.h>
#include <curand_kernel.h>
int DLGpuNormalInit(DLArrayHandle arr, const float mean, const float stddev, unsigned long long seed,
DLStreamHandle stream_handle=NULL) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
float *arr_data = (float *)arr->data;
curandGenerator_t gen;
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_PHILOX4_32_10));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, seed));
if (stream_handle)
CURAND_CALL(curandSetStream(gen, *(cudaStream_t*)stream_handle->handle));
CURAND_CALL(curandGenerateNormal(gen, arr_data, size, mean, stddev));
CURAND_CALL(curandDestroyGenerator(gen));
return 0;
}
// __global__ void init_scale_kernel(float *arr, const float lb, const float ub, size_t size) {
// size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
// if (ind >= size) return;
// arr[ind] = arr[ind] * (ub - lb) + lb;
// }
// int DLGpuUniformInit(DLArrayHandle arr, const float lb, const float ub, unsigned long long seed,
// DLStreamHandle stream_handle=NULL) {
// size_t size = 1;
// for (index_t i = 0; i < arr->ndim; i++) {
// size *= arr->shape[i];
// }
// float *arr_data = (float *)arr->data;
// curandGenerator_t gen;
// CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_PHILOX4_32_10));
// CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, seed));
// if (stream_handle)
// CURAND_CALL(curandSetStream(gen, *(cudaStream_t*)stream_handle->handle));
// CURAND_CALL(curandGenerateUniform(gen, arr_data, size));
// CURAND_CALL(curandDestroyGenerator(gen));
// dim3 blocks;
// dim3 threads;
// if (size <= 1024) {
// threads.x = size;
// blocks.x = 1;
// } else {
// threads.x = 1024;
// blocks.x = (size + 1023) / 1024;
// }
// if (stream_handle) {
// init_scale_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(arr_data, lb, ub, size);
// } else {
// init_scale_kernel<<<blocks, threads>>>(arr_data, lb, ub, size);
// }
// return 0;
// }
__global__ void init_scale_kernel(float *arr, const float lb, const float ub, unsigned long long seed, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
curandStatePhilox4_32_10_t state;
curand_init(seed + ind, 0, 0, &state);
arr[ind] = curand_uniform(&state) * (ub - lb) + lb;
}
int DLGpuUniformInit(DLArrayHandle arr, const float lb, const float ub, unsigned long long seed,
DLStreamHandle stream_handle=NULL) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
float *arr_data = (float *)arr->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
init_scale_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(arr_data, lb, ub, seed, size);
} else {
init_scale_kernel<<<blocks, threads>>>(arr_data, lb, ub, seed, size);
}
return 0;
}
__global__ void truncated_normal_kernel(float *arr, const float mean, const float stddev, unsigned long long seed, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
curandStatePhilox4_32_10_t state;
bool not_found = true;
curand_init(seed + ind, 0, 0, &state);
float temp;
while (not_found) {
temp = curand_normal(&state);
not_found = (temp < -2 || temp > 2);
}
arr[ind] = temp * stddev + mean;
}
int DLGpuTruncatedNormalInit(DLArrayHandle arr, const float mean, const float stddev, unsigned long long seed,
DLStreamHandle stream_handle=NULL) {
size_t size = 1;
for (index_t i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
float *arr_data = (float *)arr->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
truncated_normal_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(arr_data, mean, stddev, seed, size);
} else {
truncated_normal_kernel<<<blocks, threads>>>(arr_data, mean, stddev, seed, size);
}
return 0;
}
|
bb76d98a9be87161962af74fde3024b2623fe639.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype, typename Op>
__global__ void xopy_kernel_broadcast(int n, int dima0, int dima1, int dima2, int dima3,
int dimb0, int dimb1, int dimb2, int dimb3,
const Dtype* x, const Dtype* y, Dtype* z)
{
int w0, h0, c0, n0;
int w1, h1, c1, n1;
int indexa, indexb, indexy;
CUDA_KERNEL_LOOP(index, n) {
w0 = index % dima3;
h0 = (index / dima3) % dima2;
c0 = (index / dima2 / dima3) % dima1;
n0 = index / dima3 / dima2 / dima1;
w1 = (dimb3 < dima3)? 0 : w0;
h1 = (dimb2 < dima2)? 0 : h0;
c1 = (dimb1 < dima1)? 0 : c0;
n1 = (dimb0 < dima0)? 0 : n0;
indexa = index;
indexb = w1 + (h1 + (c1 + n1 * dimb1) * dimb2) * dimb3;
Op o;
z[index] = o(x[indexa], y[indexb]);
}
}
template <typename Dtype, typename Op>
__global__ void gpu_dimension_reduction(int n, int dima0, int dima1, int dima2, int dima3,
int dimb0, int dimb1, int dimb2, int dimb3,
Dtype init, const Dtype* a, Dtype* b)
{
int w0, h0, c0, n0;
int w1, h1, c1, n1;
int indexa, indexb, indexy;
Dtype result = init;
int wmax, hmax, cmax, nmax;
if (dima0 > dimb0) nmax = dima0; else nmax = 1;
if (dima1 > dimb1) cmax = dima1; else cmax = 1;
if (dima2 > dimb2) hmax = dima2; else hmax = 1;
if (dima3 > dimb3) wmax = dima3; else wmax = 1;
// a complete trash, very slow version. need optimization
CUDA_KERNEL_LOOP(index, n) {
w0 = index % dimb3;
h0 = (index / dimb3) % dimb2;
c0 = (index / dimb2 / dimb3) % dimb1;
n0 = index / dimb3 / dimb2 / dimb1;
Op op;
for (int dn = 0; dn < nmax; dn++) {
for (int dc = 0; dc < cmax; dc++) {
for (int dh = 0; dh < hmax; dh++) {
for (int dw = 0; dw < wmax; dw++) {
indexa = (w0+dw) + ((h0+dh) + ((c0+dc) + (n0+dn) * dima1) * dima2) * dima3;
result = op(result, a[indexa]);
}}}}
b[index] = result;
}
}
template <typename Dtype> class PrivateAddOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a+b; } };
template <typename Dtype> class PrivateMulOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a*b; } };
template <typename Dtype> class PrivateSubOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a-b; } };
template <typename Dtype> class PrivateRevSubOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return b-a; } };
template <typename Dtype> class PrivateDivOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a/b; } };
template <typename Dtype> class PrivateRevDivOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return b/a; } };
static bool sould_broadcast_a(const int dima[4], const int dimb[4])
{
bool brd_a = 0;
bool brd_b = 0;
for (int i=0; i<4; i++)
{
if (dima[i] < dimb[i])
{
assert(dima[i] == 1);
brd_a |= true;
}
else if (dima[i] > dimb[i])
{
assert(dimb[i] == 1);
brd_b |= true;
}
}
assert(brd_a ^ brd_b);
return brd_a;
}
template <>
void caffe_gpu_add_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateAddOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateAddOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_sub_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateRevSubOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateSubOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_mul_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateMulOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateMulOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_div_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateRevDivOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<float, PrivateDivOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_add_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateAddOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateAddOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_sub_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateRevSubOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateSubOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_mul_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateMulOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateMulOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_div_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateRevDivOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
hipLaunchKernelGGL(( xopy_kernel_broadcast<double, PrivateDivOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_sum_reduce<float>(const int dima[4], const int dimb[4],
const float* a, float* b) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na < Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
assert(1==0);
}
else
{
hipLaunchKernelGGL(( gpu_dimension_reduction <float, PrivateAddOp<float> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], 0, a, b);
}
}
template <>
void caffe_gpu_sum_reduce<double>(const int dima[4], const int dimb[4],
const double* a, double* b) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na < Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
assert(1==0);
}
else
{
hipLaunchKernelGGL(( gpu_dimension_reduction <double, PrivateAddOp<double> >)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], 0, a, b);
}
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
bb76d98a9be87161962af74fde3024b2623fe639.cu
|
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype, typename Op>
__global__ void xopy_kernel_broadcast(int n, int dima0, int dima1, int dima2, int dima3,
int dimb0, int dimb1, int dimb2, int dimb3,
const Dtype* x, const Dtype* y, Dtype* z)
{
int w0, h0, c0, n0;
int w1, h1, c1, n1;
int indexa, indexb, indexy;
CUDA_KERNEL_LOOP(index, n) {
w0 = index % dima3;
h0 = (index / dima3) % dima2;
c0 = (index / dima2 / dima3) % dima1;
n0 = index / dima3 / dima2 / dima1;
w1 = (dimb3 < dima3)? 0 : w0;
h1 = (dimb2 < dima2)? 0 : h0;
c1 = (dimb1 < dima1)? 0 : c0;
n1 = (dimb0 < dima0)? 0 : n0;
indexa = index;
indexb = w1 + (h1 + (c1 + n1 * dimb1) * dimb2) * dimb3;
Op o;
z[index] = o(x[indexa], y[indexb]);
}
}
template <typename Dtype, typename Op>
__global__ void gpu_dimension_reduction(int n, int dima0, int dima1, int dima2, int dima3,
int dimb0, int dimb1, int dimb2, int dimb3,
Dtype init, const Dtype* a, Dtype* b)
{
int w0, h0, c0, n0;
int w1, h1, c1, n1;
int indexa, indexb, indexy;
Dtype result = init;
int wmax, hmax, cmax, nmax;
if (dima0 > dimb0) nmax = dima0; else nmax = 1;
if (dima1 > dimb1) cmax = dima1; else cmax = 1;
if (dima2 > dimb2) hmax = dima2; else hmax = 1;
if (dima3 > dimb3) wmax = dima3; else wmax = 1;
// a complete trash, very slow version. need optimization
CUDA_KERNEL_LOOP(index, n) {
w0 = index % dimb3;
h0 = (index / dimb3) % dimb2;
c0 = (index / dimb2 / dimb3) % dimb1;
n0 = index / dimb3 / dimb2 / dimb1;
Op op;
for (int dn = 0; dn < nmax; dn++) {
for (int dc = 0; dc < cmax; dc++) {
for (int dh = 0; dh < hmax; dh++) {
for (int dw = 0; dw < wmax; dw++) {
indexa = (w0+dw) + ((h0+dh) + ((c0+dc) + (n0+dn) * dima1) * dima2) * dima3;
result = op(result, a[indexa]);
}}}}
b[index] = result;
}
}
template <typename Dtype> class PrivateAddOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a+b; } };
template <typename Dtype> class PrivateMulOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a*b; } };
template <typename Dtype> class PrivateSubOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a-b; } };
template <typename Dtype> class PrivateRevSubOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return b-a; } };
template <typename Dtype> class PrivateDivOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return a/b; } };
template <typename Dtype> class PrivateRevDivOp { public: __device__ Dtype operator()(const Dtype a, const Dtype b){ return b/a; } };
static bool sould_broadcast_a(const int dima[4], const int dimb[4])
{
bool brd_a = 0;
bool brd_b = 0;
for (int i=0; i<4; i++)
{
if (dima[i] < dimb[i])
{
assert(dima[i] == 1);
brd_a |= true;
}
else if (dima[i] > dimb[i])
{
assert(dimb[i] == 1);
brd_b |= true;
}
}
assert(brd_a ^ brd_b);
return brd_a;
}
template <>
void caffe_gpu_add_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<float, PrivateAddOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<float, PrivateAddOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_sub_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<float, PrivateRevSubOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<float, PrivateSubOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_mul_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<float, PrivateMulOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<float, PrivateMulOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_div_broadcast<float>(const int dima[4], const int dimb[4],
const float* a, const float* b, float* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<float, PrivateRevDivOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<float, PrivateDivOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_add_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<double, PrivateAddOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<double, PrivateAddOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_sub_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<double, PrivateRevSubOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<double, PrivateSubOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_mul_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<double, PrivateMulOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<double, PrivateMulOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_div_broadcast<double>(const int dima[4], const int dimb[4],
const double* a, const double* b, double* y) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na > Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
xopy_kernel_broadcast<double, PrivateRevDivOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dimb[0], dimb[1], dimb[2], dimb[3], dima[0], dima[1], dima[2], dima[3], b, a, y);
}
else
{
xopy_kernel_broadcast<double, PrivateDivOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], a, b, y);
}
}
template <>
void caffe_gpu_sum_reduce<float>(const int dima[4], const int dimb[4],
const float* a, float* b) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na < Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
assert(1==0);
}
else
{
gpu_dimension_reduction <float, PrivateAddOp<float> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], 0, a, b);
}
}
template <>
void caffe_gpu_sum_reduce<double>(const int dima[4], const int dimb[4],
const double* a, double* b) {
int Na = dima[0] * dima[1] * dima[2] * dima[3];
int Nb = dimb[0] * dimb[1] * dimb[2] * dimb[3];
int N = (Na < Nb)? Na: Nb;
if (sould_broadcast_a(dima, dimb))
{
assert(1==0);
}
else
{
gpu_dimension_reduction <double, PrivateAddOp<double> >
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>
(N, dima[0], dima[1], dima[2], dima[3], dimb[0], dimb[1], dimb[2], dimb[3], 0, a, b);
}
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
1ec4fdb1e388f63d4d6d8c03656c0d9f66b26a30.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// **************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschrnkt) & Co. KG
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRA 706051
// Vertreten durch:
// PARALUTION Labs Verwaltungs UG (haftungsbeschrnkt)
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRB 721277
// Geschftsfhrer: Dimitar Lukarski, Nico Trost
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// **************************************************************************
// PARALUTION version 1.1.0
#include "../../utils/def.hpp"
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_coo.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_matrix_ell.hpp"
#include "gpu_matrix_hyb.hpp"
#include "gpu_matrix_mcsr.hpp"
#include "gpu_matrix_bcsr.hpp"
#include "gpu_matrix_dense.hpp"
#include "gpu_vector.hpp"
#include "../host/host_matrix_csr.hpp"
#include "../base_matrix.hpp"
#include "../base_vector.hpp"
#include "../backend_manager.hpp"
#include "../../utils/log.hpp"
#include "../../utils/allocate_free.hpp"
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "cuda_kernels_csr.hpp"
#include "cuda_kernels_vector.hpp"
#include "cusparse_csr.hpp"
#include "gpu_allocate_free.hpp"
#include "../matrix_formats_ind.hpp"
#include <hip/hip_runtime.h>
//#include <hipsparse.h>
#include "hipsparse.h"
namespace paralution {
template <typename ValueType>
GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR() {
// no default constructors
LOG_INFO("no default constructor");
FATAL_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR(const Paralution_Backend_Descriptor local_backend) {
LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::GPUAcceleratorMatrixCSR()",
"constructor with local_backend");
this->mat_.row_offset = NULL;
this->mat_.col = NULL;
this->mat_.val = NULL;
this->set_backend(local_backend);
this->L_mat_descr_ = 0;
this->U_mat_descr_ = 0;
//this->L_mat_info_ = 0;
//this->U_mat_info_ = 0;
this->mat_descr_ = 0;
this->tmp_vec_ = NULL;
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipsparseStatus_t stat_t;
stat_t = hipsparseCreateMatDescr(&this->mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->mat_descr_, HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixCSR<ValueType>::~GPUAcceleratorMatrixCSR() {
LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::~GPUAcceleratorMatrixCSR()",
"destructor");
this->Clear();
hipsparseStatus_t stat_t;
stat_t = hipsparseDestroyMatDescr(this->mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::info(void) const {
LOG_INFO("GPUAcceleratorMatrixCSR<ValueType>");
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::AllocateCSR(const int nnz, const int nrow, const int ncol) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
if (this->get_nnz() > 0)
this->Clear();
if (nnz > 0) {
allocate_gpu(nrow+1, &this->mat_.row_offset);
allocate_gpu(nnz, &this->mat_.col);
allocate_gpu(nnz, &this->mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nrow+1, mat_.row_offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.col);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.val);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::SetDataPtrCSR(int **row_offset, int **col, ValueType **val,
const int nnz, const int nrow, const int ncol) {
assert(*row_offset != NULL);
assert(*col != NULL);
assert(*val != NULL);
assert(nnz > 0);
assert(nrow > 0);
assert(ncol > 0);
this->Clear();
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
hipDeviceSynchronize();
this->mat_.row_offset = *row_offset;
this->mat_.col = *col;
this->mat_.val = *val;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LeaveDataPtrCSR(int **row_offset, int **col, ValueType **val) {
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
assert(this->get_nnz() > 0);
hipDeviceSynchronize();
// see free_host function for details
*row_offset = this->mat_.row_offset;
*col = this->mat_.col;
*val = this->mat_.val;
this->mat_.row_offset = NULL;
this->mat_.col = NULL;
this->mat_.val = NULL;
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::Clear() {
if (this->get_nnz() > 0) {
free_gpu(&this->mat_.row_offset);
free_gpu(&this->mat_.col);
free_gpu(&this->mat_.val);
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
this->LUAnalyseClear();
this->LLAnalyseClear();
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Zeros() {
if (this->get_nnz() > 0)
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
this->get_nnz(), mat_.val);
return true;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) {
const HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.row_offset, // dst
cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) {
const HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(this->mat_.row_offset, // dst
cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(this->mat_.col, // dst
cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const {
HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const {
HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.row_offset, // dst
gpu_cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
gpu_cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHost(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.row_offset, // dst
gpu_cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
gpu_cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHostAsync(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHost(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHostAsync(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromCSR(const int *row_offsets, const int *col, const ValueType *val) {
// assert CSR format
assert(this->get_mat_format() == CSR);
if (this->get_nnz() > 0) {
assert(this->nrow_ > 0);
assert(this->ncol_ > 0);
hipMemcpy(this->mat_.row_offset, // dst
row_offsets, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToCSR(int *row_offsets, int *col, ValueType *val) const {
// assert CSR format
assert(this->get_mat_format() == CSR);
if (this->get_nnz() > 0) {
assert(this->nrow_ > 0);
assert(this->ncol_ > 0);
hipMemcpy(row_offsets, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) {
this->Clear();
// empty matrix is empty matrix
if (mat.get_nnz() == 0)
return true;
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr;
if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) {
this->CopyFrom(*cast_mat_csr);
return true;
}
/*
const GPUAcceleratorMatrixCOO<ValueType> *cast_mat_coo;
if ((cast_mat_coo = dynamic_cast<const GPUAcceleratorMatrixCOO<ValueType>*> (&mat)) != NULL) {
this->Clear();
TODO
Allocate
copy colmn
copy val
hipsparseStatus_t
hipsparseXcoo2csr(hipsparseHandle_t handle, const int *cooRowInd,
int nnz, int m, int *csrRowPtr, hipsparseIndexBase_t
idxBase);
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_coo->get_nrow();
this->ncol_ = cast_mat_coo->get_ncol();
this->nnz_ = cast_mat_coo->get_nnz();
return true;
}
*/
/*
const GPUAcceleratorMatrixDENSE<ValueType> *cast_mat_dense;
if ((cast_mat_dense = dynamic_cast<const GPUAcceleratorMatrixDENSE<ValueType>*> (&mat)) != NULL) {
this->Clear();
int nnz = 0;
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_dense->get_nrow();
this->ncol_ = cast_mat_dense->get_ncol();
this->nnz_ = nnz;
return true;
}
*/
/*
const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia;
if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) {
this->Clear();
int nnz = 0;
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_dia->get_nrow();
this->ncol_ = cast_mat_dia->get_ncol();
this->nnz_ = nnz ;
return true;
}
*/
/*
const GPUAcceleratorMatrixELL<ValueType> *cast_mat_ell;
if ((cast_mat_ell = dynamic_cast<const GPUAcceleratorMatrixELL<ValueType>*> (&mat)) != NULL) {
this->Clear();
int nnz = 0;
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_ell->get_nrow();
this->ncol_ = cast_mat_ell->get_ncol();
this->nnz_ = nnz ;
return true;
}
*/
/*
const GPUAcceleratorMatrixMCSR<ValueType> *cast_mat_mcsr;
if ((cast_mat_mcsr = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&mat)) != NULL) {
this->Clear();
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_mcsr->get_nrow();
this->ncol_ = cast_mat_mcsr->get_ncol();
this->nnz_ = cast_mat_mcsr->get_nnz();
return true;
}
*/
/*
const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb;
if ((cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) != NULL) {
this->Clear();
FATAL_ERROR(__FILE__, __LINE__);
int nnz = 0;
this->nrow_ = cast_mat_hyb->get_nrow();
this->ncol_ = cast_mat_hyb->get_ncol();
this->nnz_ = nnz;
return true;
}
*/
return false;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHostCSR(const int *row_offset, const int *col, const ValueType *val,
const int nnz, const int nrow, const int ncol) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
assert(row_offset != NULL);
assert(col != NULL);
assert(val != NULL);
// Allocate matrix
if (this->nnz_ > 0)
this->Clear();
if (nnz > 0) {
allocate_gpu(nrow+1, &this->mat_.row_offset);
allocate_gpu(nnz, &this->mat_.col);
allocate_gpu(nnz, &this->mat_.val);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
hipMemcpy(this->mat_.row_offset, // dst
row_offset, // src
(this->nrow_+1)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
col, // src
this->nnz_*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
val, // src
this->nnz_*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Permute( const BaseVector<int> &permutation){
assert(&permutation != NULL);
assert(permutation.get_size() == this->get_nrow());
assert(permutation.get_size() == this->get_ncol());
if (this->get_nnz() > 0) {
int *d_nnzr = NULL;
int *d_nnzrPerm = NULL;
int *d_nnzPerm = NULL;
int *d_offset = NULL;
ValueType *d_data = NULL;
allocate_gpu<int>(this->get_nrow(), &d_nnzr);
allocate_gpu<int>(this->get_nrow(), &d_nnzrPerm);
allocate_gpu<int>((this->get_nrow()+1), &d_nnzPerm);
allocate_gpu<ValueType>(this->get_nnz(), &d_data);
allocate_gpu<int>(this->get_nnz(), &d_offset);
const GPUAcceleratorVector<int> *cast_perm = dynamic_cast<const GPUAcceleratorVector<int>*> (&permutation);
assert(cast_perm != NULL);
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_calc_row_nnz<int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, d_nnzr);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_row_nnz<int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), d_nnzr, cast_perm->vec_, d_nnzrPerm);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
//TODO
//move in extra file
cum_sum<int, 256>(d_nnzPerm, d_nnzrPerm, this->get_nrow());
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_rows<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0,
this->get_nrow(),
this->mat_.row_offset,
d_nnzPerm,
this->mat_.col,
this->mat_.val,
cast_perm->vec_,
d_nnzr,
d_offset,
d_data);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
free_gpu<int>(&this->mat_.row_offset);
this->mat_.row_offset = d_nnzPerm;
int *d_buffer = NULL;
int *h_buffer = NULL;
int GROUP_SIZE;
int LOCAL_SIZE;
int FinalReduceSize;
allocate_gpu<int>(this->local_backend_.GPU_warp * 4, &d_buffer);
dim3 BlockSize2(this->local_backend_.GPU_block_size);
dim3 GridSize2(this->local_backend_.GPU_warp * 4);
GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_warp * 4 ) ) + 1 )
/ this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size;
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_max<int, int, 256>), dim3(GridSize2), dim3(BlockSize2), 0, 0, nrow, d_nnzr, d_buffer, GROUP_SIZE, LOCAL_SIZE);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
FinalReduceSize = this->local_backend_.GPU_warp * 4;
allocate_host(FinalReduceSize, &h_buffer);
hipMemcpy(h_buffer, // dst
d_buffer, // src
FinalReduceSize*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&d_buffer);
int maxnnzrow = 0;
for (int i=0; i<FinalReduceSize; ++i)
if (maxnnzrow < h_buffer[i])
maxnnzrow = h_buffer[i];
free_host(&h_buffer);
if (maxnnzrow > 64)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols_fallback<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 32)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 64>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 16)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 32>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 8)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 16>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 4)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 8>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 4>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
free_gpu<int>(&d_offset);
free_gpu<ValueType>(&d_data);
free_gpu<int>(&d_nnzrPerm);
free_gpu<int>(&d_nnzr);
}
return true;
}
template <>
void GPUAcceleratorMatrixCSR<float>::Apply(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ;
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const float scalar = 1.0;
const float beta = 0.0;
stat_t = hipsparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse instead...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_spmv_scalar<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <>
void GPUAcceleratorMatrixCSR<double>::Apply(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ;
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const double scalar = 1.0;
const double beta = 0.0;
stat_t = hipsparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val,
this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse instead...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_spmv_scalar<double, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <>
void GPUAcceleratorMatrixCSR<float>::ApplyAdd(const BaseVector<float> &in, const float scalar,
BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const float beta = 1.0;
stat_t = hipsparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse now...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_spmv_scalar<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
scalar, cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <>
void GPUAcceleratorMatrixCSR<double>::ApplyAdd(const BaseVector<double> &in, const double scalar,
BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const double beta = 1.0;
stat_t = hipsparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse now...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_spmv_scalar<double, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
scalar, cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
// Srinivas K
template <>
bool GPUAcceleratorMatrixCSR<float>::ILU0Factorize(void) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDestroySolveAnalysisInfo(infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::ILU0Factorize(void) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
// Srinivas K
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDestroySolveAnalysisInfo(infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::ICFactorize(BaseVector<float> *inv_diag) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
//Srinivas K
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_SYMMETRIC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::ICFactorize(BaseVector<double> *inv_diag) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
// Srinivas K
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_SYMMETRIC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
void GPUAcceleratorMatrixCSR<double>::LUAnalyse(void) {
this->LUAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::LUAnalyse(void) {
this->LUAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<float>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LUAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->L_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->L_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->L_mat_descr_ = 0;
this->U_mat_descr_ = 0;
this->L_mat_info_ = 0;
this->U_mat_info_ = 0;
if (this ->tmp_vec_ != NULL) {
delete this->tmp_vec_ ;
this->tmp_vec_ = NULL;
}
*/
}
template <>
bool GPUAcceleratorMatrixCSR<float>::LUSolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ != NULL);
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve L
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::LUSolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve L
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
this->tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
this->tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
void GPUAcceleratorMatrixCSR<double>::LLAnalyse(void) {
this->LLAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::LLAnalyse(void) {
this->LLAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<float>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LLAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->L_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->L_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->L_mat_descr_ = 0;
this->U_mat_descr_ = 0;
this->L_mat_info_ = 0;
this->U_mat_info_ = 0;
*/
if (this ->tmp_vec_ != NULL) {
delete this->tmp_vec_ ;
this->tmp_vec_ = NULL;
}
}
template <>
bool GPUAcceleratorMatrixCSR<double>::LLSolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve L
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
this->tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
this->tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::LLSolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve L
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
this->tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
this->tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::LLSolve(const BaseVector<ValueType> &in, const BaseVector<ValueType> &inv_diag,
BaseVector<ValueType> *out) const {
return LLSolve(in, out);
}
template <>
void GPUAcceleratorMatrixCSR<double>::LAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::LAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <>
void GPUAcceleratorMatrixCSR<double>::UAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// U upart
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::UAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->L_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->L_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->L_mat_descr_ = 0;
this->L_mat_info_ = 0;
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::UAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->U_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->U_mat_descr_ = 0;
this->U_mat_info_ = 0;
*/
}
template <>
bool GPUAcceleratorMatrixCSR<double>::LSolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve L
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::LSolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve L
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::USolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve U
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::USolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve U
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractDiagonal(BaseVector<ValueType> *vec_diag) const {
if (this->get_nnz() > 0) {
assert(vec_diag != NULL);
assert(vec_diag->get_size() == this->get_nrow());
GPUAcceleratorVector<ValueType> *cast_vec_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_diag);
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_diag<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
cast_vec_diag->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractInverseDiagonal(BaseVector<ValueType> *vec_inv_diag) const {
if (this->get_nnz() > 0) {
assert(vec_inv_diag != NULL);
assert(vec_inv_diag->get_size() == this->get_nrow());
GPUAcceleratorVector<ValueType> *cast_vec_inv_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_inv_diag);
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_inv_diag<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
this->mat_.val, cast_vec_inv_diag->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractSubMatrix(const int row_offset,
const int col_offset,
const int row_size,
const int col_size,
BaseMatrix<ValueType> *mat) const {
assert(mat != NULL);
assert(row_offset >= 0);
assert(col_offset >= 0);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (mat) ;
assert(cast_mat != NULL);
int mat_nnz = 0;
int *row_nnz = NULL;
//int *red_row_nnz (int *) malloc(sizeof(int)*(row_size+1));
int *sub_nnz = NULL;
allocate_gpu<int>(row_size+1, &sub_nnz);
allocate_gpu(row_size+1, &row_nnz);
// compute the nnz per row in the new matrix
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(row_size / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_submatrix_row_nnz<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset, this->mat_.col, this->mat_.val,
row_offset, col_offset,
row_size, col_size,
row_nnz);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// compute the new nnz by reduction
// CPU reduction
/*
hipMemcpy(red_row_nnz, // dst
row_nnz, // src
(row_size+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
int sum=0;
for (int i=0; i<row_size; ++i) {
int tmp = red_row_nnz[i];
red_row_nnz[i] = sum;
sum += tmp;
}
mat_nnz = red_row_nnz[row_size] = sum ;
*/
//TODO
//move in extra file
cum_sum<int, 256>(sub_nnz, row_nnz, row_size);
hipMemcpy(&mat_nnz, &sub_nnz[row_size],
sizeof(int), hipMemcpyDeviceToHost);
// not empty submatrix
if (mat_nnz > 0) {
cast_mat->AllocateCSR(mat_nnz, row_size, col_size);
// part of the CPU reduction section
/*
hipMemcpy(cast_mat->mat_.row_offset, // dst
red_row_nnz, // src
(row_size+1)*sizeof(int), // size
hipMemcpyHostToDevice);
*/
free_gpu<int>(&cast_mat->mat_.row_offset);
cast_mat->mat_.row_offset = sub_nnz;
// copying the sub matrix
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_submatrix_copy<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset, this->mat_.col, this->mat_.val,
row_offset, col_offset,
row_size, col_size,
cast_mat->mat_.row_offset, cast_mat->mat_.col, cast_mat->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
free_gpu(&row_nnz);
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractL(BaseMatrix<ValueType> *L) const {
assert(L != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L);
assert(cast_L != NULL);
cast_L->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_slower_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_L->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_L->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_L->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_L->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val);
// fill lower triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_l_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_L->mat_.row_offset,
cast_L->mat_.col,
cast_L->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_L->nrow_ = this->get_nrow();
cast_L->ncol_ = this->get_ncol();
cast_L->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractLDiagonal(BaseMatrix<ValueType> *L) const {
assert(L != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L);
assert(cast_L != NULL);
cast_L->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_lower_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_L->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_L->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_L->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_L->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val);
// fill lower triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_l_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_L->mat_.row_offset,
cast_L->mat_.col,
cast_L->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_L->nrow_ = this->get_nrow();
cast_L->ncol_ = this->get_ncol();
cast_L->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractU(BaseMatrix<ValueType> *U) const {
assert(U != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U);
assert(cast_U != NULL);
cast_U->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_supper_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_U->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_U->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_U->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_U->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val);
// fill upper triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_u_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_U->mat_.row_offset,
cast_U->mat_.col,
cast_U->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_U->nrow_ = this->get_nrow();
cast_U->ncol_ = this->get_ncol();
cast_U->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractUDiagonal(BaseMatrix<ValueType> *U) const {
assert(U != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U);
assert(cast_U != NULL);
cast_U->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_upper_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_U->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_U->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_U->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_U->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val);
// fill lower triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_u_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_U->mat_.row_offset,
cast_U->mat_.col,
cast_U->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_U->nrow_ = this->get_nrow();
cast_U->ncol_ = this->get_ncol();
cast_U->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MaximalIndependentSet(int &size,
BaseVector<int> *permutation) const {
assert(permutation != NULL);
GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation);
assert(cast_perm != NULL);
assert(this->get_nrow() == this->get_ncol());
int *h_row_offset = NULL;
int *h_col = NULL;
allocate_host(this->get_nrow()+1, &h_row_offset);
allocate_host(this->get_nnz(), &h_col);
hipMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), hipMemcpyDeviceToHost);
int *mis = NULL;
allocate_host(this->get_nrow(), &mis);
memset(mis, 0, sizeof(int)*this->get_nrow());
size = 0 ;
for (int ai=0; ai<this->get_nrow(); ++ai) {
if (mis[ai] == 0) {
// set the node
mis[ai] = 1;
++size ;
//remove all nbh nodes (without diagonal)
for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj)
if (ai != h_col[aj])
mis[h_col[aj]] = -1 ;
}
}
int *h_perm = NULL;
allocate_host(this->get_nrow(), &h_perm);
int pos = 0;
for (int ai=0; ai<this->get_nrow(); ++ai) {
if (mis[ai] == 1) {
h_perm[ai] = pos;
++pos;
} else {
h_perm[ai] = size + ai - pos;
}
}
// Check the permutation
//
// for (int ai=0; ai<this->get_nrow(); ++ai) {
// assert( h_perm[ai] >= 0 );
// assert( h_perm[ai] < this->get_nrow() );
// }
cast_perm->Allocate(this->get_nrow());
hipMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), hipMemcpyHostToDevice);
free_host(&h_row_offset);
free_host(&h_col);
free_host(&h_perm);
free_host(&mis);
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MultiColoring(int &num_colors,
int **size_colors,
BaseVector<int> *permutation) const {
assert(permutation != NULL);
GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation);
assert(cast_perm != NULL);
// node colors (init value = 0 i.e. no color)
int *color = NULL;
int *h_row_offset = NULL;
int *h_col = NULL;
int size = this->get_nrow();
allocate_host(size, &color);
allocate_host(this->get_nrow()+1, &h_row_offset);
allocate_host(this->get_nnz(), &h_col);
hipMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), hipMemcpyDeviceToHost);
memset(color, 0, size*sizeof(int));
num_colors = 0;
std::vector<bool> row_col;
for (int ai=0; ai<this->get_nrow(); ++ai) {
color[ai] = 1;
row_col.clear();
row_col.assign(num_colors+2, false);
for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj)
if (ai != h_col[aj])
row_col[color[h_col[aj]]] = true;
for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj)
if (row_col[color[ai]] == true)
++color[ai];
if (color[ai] > num_colors)
num_colors = color[ai];
}
free_host(&h_row_offset);
free_host(&h_col);
allocate_host(num_colors, size_colors);
set_to_zero_host(num_colors, *size_colors);
int *offsets_color = NULL;
allocate_host(num_colors, &offsets_color);
memset(offsets_color, 0, sizeof(int)*num_colors);
for (int i=0; i<this->get_nrow(); ++i)
++(*size_colors)[color[i]-1];
int total=0;
for (int i=1; i<num_colors; ++i) {
total += (*size_colors)[i-1];
offsets_color[i] = total;
// LOG_INFO("offsets = " << total);
}
int *h_perm = NULL;
allocate_host(this->get_nrow(), &h_perm);
for (int i=0; i<this->get_nrow(); ++i) {
h_perm[i] = offsets_color[ color[i]-1 ] ;
++offsets_color[color[i]-1];
}
cast_perm->Allocate(this->get_nrow());
hipMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), hipMemcpyHostToDevice);
free_host(&h_perm);
free_host(&color);
free_host(&offsets_color);
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::Scale(const double alpha) {
if (this->get_nnz() > 0) {
hipblasStatus_t stat_t;
stat_t = hipblasDscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle),
this->get_nnz(), &alpha,
this->mat_.val, 1);
CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::Scale(const float alpha) {
if (this->get_nnz() > 0) {
hipblasStatus_t stat_t;
stat_t = hipblasSscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle),
this->get_nnz(), &alpha,
this->mat_.val, 1);
CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ScaleDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_scale_diagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ScaleOffDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_scale_offdiagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_diagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarOffDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_offdiagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::AddScalar(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nnz = this->get_nnz();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nnz / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_buffer_addscalar<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nnz, alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMultR(const BaseVector<ValueType> &diag) {
assert(diag.get_size() == this->get_ncol());
const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag);
assert(cast_diag!= NULL);
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_diagmatmult_r<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
cast_diag->vec_, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMultL(const BaseVector<ValueType> &diag) {
assert(diag.get_size() == this->get_ncol());
const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag);
assert(cast_diag!= NULL);
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_diagmatmult_l<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
cast_diag->vec_, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MatMatMult(const BaseMatrix<ValueType> &A, const BaseMatrix<ValueType> &B) {
assert(A.get_ncol() == B.get_nrow());
assert(A.get_nrow() > 0);
assert(B.get_ncol() > 0);
assert(B.get_nrow() > 0);
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_A = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&A);
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_B = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&B);
assert(cast_mat_A != NULL);
assert(cast_mat_B != NULL);
this->Clear();
int m = cast_mat_A->get_nrow();
int n = cast_mat_B->get_ncol();
int k = cast_mat_B->get_nrow();
int nnzC = 0;
allocate_gpu(m+1, &this->mat_.row_offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipsparseStatus_t stat_t;
stat_t = hipsparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_POINTER_MODE_HOST);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseXcsrgemmNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m, n, k,
cast_mat_A->mat_descr_, cast_mat_A->get_nnz(),
cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col,
cast_mat_B->mat_descr_, cast_mat_B->get_nnz(),
cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col,
this->mat_descr_, this->mat_.row_offset,
&nnzC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
allocate_gpu(nnzC, &this->mat_.col);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
allocate_gpu(nnzC, &this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
this->nrow_ = m;
this->ncol_ = n;
this->nnz_ = nnzC;
stat_t = __cusparseXcsrgemm__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m, n, k,
// A
cast_mat_A->mat_descr_, cast_mat_A->get_nnz(),
cast_mat_A->mat_.val,
cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col,
// B
cast_mat_B->mat_descr_, cast_mat_B->get_nnz(),
cast_mat_B->mat_.val,
cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col,
// C
this->mat_descr_,
this->mat_.val,
this->mat_.row_offset, this->mat_.col);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Gershgorin(ValueType &lambda_min,
ValueType &lambda_max) const {
return false;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MatrixAdd(const BaseMatrix<ValueType> &mat, const ValueType alpha,
const ValueType beta, const bool structure) {
if (this->get_nnz() > 0) {
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat);
assert(cast_mat != NULL);
assert(cast_mat->get_nrow() == this->get_nrow());
assert(cast_mat->get_ncol() == this->get_ncol());
assert(this ->get_nnz() > 0);
assert(cast_mat->get_nnz() > 0);
if (structure == false) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_csr_same_struct<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow,
this->mat_.row_offset, this->mat_.col,
cast_mat->mat_.row_offset,
cast_mat->mat_.col, cast_mat->mat_.val,
alpha, beta, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
} else {
// New structure with CUSPARSE routines
int m = this->get_nrow();
int n = this->get_ncol();
int *csrRowPtrC = NULL;
int *csrColC = NULL;
ValueType *csrValC = NULL;
int nnzC;
allocate_gpu(m+1, &csrRowPtrC);
hipsparseStatus_t stat_t;
hipsparseMatDescr_t desc_mat_C = 0;
stat_t = hipsparseCreateMatDescr(&desc_mat_C);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(desc_mat_C, HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(desc_mat_C, HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_POINTER_MODE_HOST);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
stat_t = hipsparseXcsrgeamNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
m, n,
this->mat_descr_, this->get_nnz(),
this->mat_.row_offset, this->mat_.col,
cast_mat->mat_descr_, cast_mat->get_nnz(),
cast_mat->mat_.row_offset, cast_mat->mat_.col,
desc_mat_C, csrRowPtrC,
&nnzC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
allocate_gpu(nnzC, &csrColC);
allocate_gpu(nnzC, &csrValC);
stat_t = __cusparseXcsrgeam__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
m, n,
// A
&alpha,
this->mat_descr_, this->get_nnz(),
this->mat_.val,
this->mat_.row_offset, this->mat_.col,
// B
&beta,
cast_mat->mat_descr_, cast_mat->get_nnz(),
cast_mat->mat_.val,
cast_mat->mat_.row_offset, cast_mat->mat_.col,
// C
desc_mat_C,
csrValC,
csrRowPtrC, csrColC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
stat_t = hipsparseDestroyMatDescr(desc_mat_C);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
this->Clear();
this->mat_.row_offset = csrRowPtrC;
this->mat_.col = csrColC;
this->mat_.val = csrValC;
this->nrow_ = m;
this->ncol_ = n;
this->nnz_ = nnzC;
}
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Compress(const double drop_off) {
if (this->get_nnz() > 0) {
GPUAcceleratorMatrixCSR<ValueType> tmp(this->local_backend_);
tmp.CopyFrom(*this);
int mat_nnz = 0;
int *row_offset = NULL;
allocate_gpu(this->get_nrow()+1, &row_offset);
int *mat_row_offset = NULL;
allocate_gpu(this->get_nrow()+1, &mat_row_offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
this->get_nrow()+1, row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(this->get_nrow() / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_compress_count_nrow<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
this->get_nrow(),
drop_off,
row_offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// TODO
cum_sum<int, 256>(mat_row_offset, row_offset, this->get_nrow());
// get the new mat nnz
hipMemcpy(&mat_nnz, &mat_row_offset[this->get_nrow()],
sizeof(int), hipMemcpyDeviceToHost);
this->AllocateCSR(mat_nnz, this->get_nrow(), this->get_ncol());
// TODO - just exchange memory pointers
// copy row_offset
hipMemcpy(this->mat_.row_offset, mat_row_offset,
(this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToDevice);
// copy col and val
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_compress_copy<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, tmp.mat_.row_offset,
tmp.mat_.col,
tmp.mat_.val,
tmp.get_nrow(),
drop_off,
this->mat_.row_offset,
this->mat_.col,
this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu(&row_offset);
free_gpu(&mat_row_offset);
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::Transpose(void) {
if (this->get_nnz() > 0) {
GPUAcceleratorMatrixCSR<double> tmp(this->local_backend_);
tmp.CopyFrom(*this);
this->Clear();
this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow());
hipsparseStatus_t stat_t;
stat_t = hipsparseDcsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(),
tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col,
this->mat_.val, this->mat_.col, this->mat_.row_offset,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::Transpose(void) {
if (this->get_nnz() > 0) {
GPUAcceleratorMatrixCSR<float> tmp(this->local_backend_);
tmp.CopyFrom(*this);
this->Clear();
this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow());
hipsparseStatus_t stat_t;
stat_t = hipsparseScsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(),
tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col,
this->mat_.val, this->mat_.col, this->mat_.row_offset,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ReplaceColumnVector(const int idx, const BaseVector<ValueType> &vec) {
assert(&vec != NULL);
assert(vec.get_size() == this->nrow_);
if (this->get_nnz() > 0) {
const GPUAcceleratorVector<ValueType> *cast_vec = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&vec);
assert(cast_vec != NULL);
int *row_offset = NULL;
int *col = NULL;
ValueType *val = NULL;
int nrow = this->get_nrow();
int ncol = this->get_ncol();
allocate_gpu(nrow+1, &row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_replace_column_vector_offset<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
nrow,
idx,
cast_vec->vec_,
row_offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
int *host_offset = NULL;
allocate_host(nrow+1, &host_offset);
hipMemcpy(host_offset,
row_offset,
sizeof(int)*(nrow+1),
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
host_offset[0] = 0;
for (int i=0; i<nrow; ++i)
host_offset[i+1] += host_offset[i];
int nnz = host_offset[nrow];
hipMemcpy(row_offset,
host_offset,
sizeof(int)*(nrow+1),
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
allocate_gpu(nnz, &col);
allocate_gpu(nnz, &val);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_replace_column_vector<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
nrow,
idx,
cast_vec->vec_,
row_offset,
col,
val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
this->Clear();
this->SetDataPtrCSR(&row_offset, &col, &val, nnz, nrow, ncol);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractColumnVector(const int idx, BaseVector<ValueType> *vec) const {
assert(vec != NULL);
assert(vec->get_size() == this->nrow_);
if (this->get_nnz() > 0) {
GPUAcceleratorVector<ValueType> *cast_vec = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec);
assert(cast_vec != NULL);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(this->get_nrow() / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_column_vector<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
this->get_nrow(),
idx,
cast_vec->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractRowVector(const int idx, BaseVector<ValueType> *vec) const {
assert(vec != NULL);
assert(vec->get_size() == this->ncol_);
if (this->get_nnz() > 0) {
GPUAcceleratorVector<ValueType> *cast_vec = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec);
assert(cast_vec != NULL);
cast_vec->Zeros();
// Get nnz of row idx
int nnz[2];
hipMemcpy(nnz,
this->mat_.row_offset+idx,
2*sizeof(int),
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
int row_nnz = nnz[1] - nnz[0];
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(row_nnz / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_row_vector<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
row_nnz,
idx,
cast_vec->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
// Srinivas K
template class GPUAcceleratorMatrixCSR<double>;
template class GPUAcceleratorMatrixCSR<float>;
}
|
1ec4fdb1e388f63d4d6d8c03656c0d9f66b26a30.cu
|
#include "hip/hip_runtime.h"
// **************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschränkt) & Co. KG
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRA 706051
// Vertreten durch:
// PARALUTION Labs Verwaltungs UG (haftungsbeschränkt)
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRB 721277
// Geschäftsführer: Dimitar Lukarski, Nico Trost
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// **************************************************************************
// PARALUTION version 1.1.0
#include "../../utils/def.hpp"
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_coo.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_matrix_ell.hpp"
#include "gpu_matrix_hyb.hpp"
#include "gpu_matrix_mcsr.hpp"
#include "gpu_matrix_bcsr.hpp"
#include "gpu_matrix_dense.hpp"
#include "gpu_vector.hpp"
#include "../host/host_matrix_csr.hpp"
#include "../base_matrix.hpp"
#include "../base_vector.hpp"
#include "../backend_manager.hpp"
#include "../../utils/log.hpp"
#include "../../utils/allocate_free.hpp"
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "cuda_kernels_csr.hpp"
#include "cuda_kernels_vector.hpp"
#include "cusparse_csr.hpp"
#include "gpu_allocate_free.hpp"
#include "../matrix_formats_ind.hpp"
#include <hip/hip_runtime.h>
//#include <hipsparse.h>
#include "hipsparse.h"
namespace paralution {
template <typename ValueType>
GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR() {
// no default constructors
LOG_INFO("no default constructor");
FATAL_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixCSR<ValueType>::GPUAcceleratorMatrixCSR(const Paralution_Backend_Descriptor local_backend) {
LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::GPUAcceleratorMatrixCSR()",
"constructor with local_backend");
this->mat_.row_offset = NULL;
this->mat_.col = NULL;
this->mat_.val = NULL;
this->set_backend(local_backend);
this->L_mat_descr_ = 0;
this->U_mat_descr_ = 0;
//this->L_mat_info_ = 0;
//this->U_mat_info_ = 0;
this->mat_descr_ = 0;
this->tmp_vec_ = NULL;
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipsparseStatus_t stat_t;
stat_t = hipsparseCreateMatDescr(&this->mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->mat_descr_, HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixCSR<ValueType>::~GPUAcceleratorMatrixCSR() {
LOG_DEBUG(this, "GPUAcceleratorMatrixCSR::~GPUAcceleratorMatrixCSR()",
"destructor");
this->Clear();
hipsparseStatus_t stat_t;
stat_t = hipsparseDestroyMatDescr(this->mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::info(void) const {
LOG_INFO("GPUAcceleratorMatrixCSR<ValueType>");
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::AllocateCSR(const int nnz, const int nrow, const int ncol) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
if (this->get_nnz() > 0)
this->Clear();
if (nnz > 0) {
allocate_gpu(nrow+1, &this->mat_.row_offset);
allocate_gpu(nnz, &this->mat_.col);
allocate_gpu(nnz, &this->mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nrow+1, mat_.row_offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.col);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.val);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::SetDataPtrCSR(int **row_offset, int **col, ValueType **val,
const int nnz, const int nrow, const int ncol) {
assert(*row_offset != NULL);
assert(*col != NULL);
assert(*val != NULL);
assert(nnz > 0);
assert(nrow > 0);
assert(ncol > 0);
this->Clear();
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
hipDeviceSynchronize();
this->mat_.row_offset = *row_offset;
this->mat_.col = *col;
this->mat_.val = *val;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LeaveDataPtrCSR(int **row_offset, int **col, ValueType **val) {
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
assert(this->get_nnz() > 0);
hipDeviceSynchronize();
// see free_host function for details
*row_offset = this->mat_.row_offset;
*col = this->mat_.col;
*val = this->mat_.val;
this->mat_.row_offset = NULL;
this->mat_.col = NULL;
this->mat_.val = NULL;
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::Clear() {
if (this->get_nnz() > 0) {
free_gpu(&this->mat_.row_offset);
free_gpu(&this->mat_.col);
free_gpu(&this->mat_.val);
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
this->LUAnalyseClear();
this->LLAnalyseClear();
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Zeros() {
if (this->get_nnz() > 0)
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
this->get_nnz(), mat_.val);
return true;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) {
const HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.row_offset, // dst
cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) {
const HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(this->mat_.row_offset, // dst
cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(this->mat_.col, // dst
cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const {
HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const {
HostMatrixCSR<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixCSR<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateCSR(this->get_nnz(), this->get_nrow(), this->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.row_offset, // dst
gpu_cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
gpu_cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHost(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateCSR(src.get_nnz(), src.get_nrow(), src.get_ncol() );
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.row_offset, // dst
gpu_cast_mat->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
gpu_cast_mat->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHostAsync(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHost(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixCSR<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateCSR(dst->get_nnz(), dst->get_nrow(), dst->get_ncol() );
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.row_offset, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHostAsync(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromCSR(const int *row_offsets, const int *col, const ValueType *val) {
// assert CSR format
assert(this->get_mat_format() == CSR);
if (this->get_nnz() > 0) {
assert(this->nrow_ > 0);
assert(this->ncol_ > 0);
hipMemcpy(this->mat_.row_offset, // dst
row_offsets, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyToCSR(int *row_offsets, int *col, ValueType *val) const {
// assert CSR format
assert(this->get_mat_format() == CSR);
if (this->get_nnz() > 0) {
assert(this->nrow_ > 0);
assert(this->ncol_ > 0);
hipMemcpy(row_offsets, // dst
this->mat_.row_offset, // src
(this->get_nrow()+1)*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(col, // dst
this->mat_.col, // src
this->get_nnz()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) {
this->Clear();
// empty matrix is empty matrix
if (mat.get_nnz() == 0)
return true;
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr;
if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) {
this->CopyFrom(*cast_mat_csr);
return true;
}
/*
const GPUAcceleratorMatrixCOO<ValueType> *cast_mat_coo;
if ((cast_mat_coo = dynamic_cast<const GPUAcceleratorMatrixCOO<ValueType>*> (&mat)) != NULL) {
this->Clear();
TODO
Allocate
copy colmn
copy val
hipsparseStatus_t
hipsparseXcoo2csr(hipsparseHandle_t handle, const int *cooRowInd,
int nnz, int m, int *csrRowPtr, hipsparseIndexBase_t
idxBase);
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_coo->get_nrow();
this->ncol_ = cast_mat_coo->get_ncol();
this->nnz_ = cast_mat_coo->get_nnz();
return true;
}
*/
/*
const GPUAcceleratorMatrixDENSE<ValueType> *cast_mat_dense;
if ((cast_mat_dense = dynamic_cast<const GPUAcceleratorMatrixDENSE<ValueType>*> (&mat)) != NULL) {
this->Clear();
int nnz = 0;
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_dense->get_nrow();
this->ncol_ = cast_mat_dense->get_ncol();
this->nnz_ = nnz;
return true;
}
*/
/*
const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia;
if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) {
this->Clear();
int nnz = 0;
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_dia->get_nrow();
this->ncol_ = cast_mat_dia->get_ncol();
this->nnz_ = nnz ;
return true;
}
*/
/*
const GPUAcceleratorMatrixELL<ValueType> *cast_mat_ell;
if ((cast_mat_ell = dynamic_cast<const GPUAcceleratorMatrixELL<ValueType>*> (&mat)) != NULL) {
this->Clear();
int nnz = 0;
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_ell->get_nrow();
this->ncol_ = cast_mat_ell->get_ncol();
this->nnz_ = nnz ;
return true;
}
*/
/*
const GPUAcceleratorMatrixMCSR<ValueType> *cast_mat_mcsr;
if ((cast_mat_mcsr = dynamic_cast<const GPUAcceleratorMatrixMCSR<ValueType>*> (&mat)) != NULL) {
this->Clear();
FATAL_ERROR(__FILE__, __LINE__);
this->nrow_ = cast_mat_mcsr->get_nrow();
this->ncol_ = cast_mat_mcsr->get_ncol();
this->nnz_ = cast_mat_mcsr->get_nnz();
return true;
}
*/
/*
const GPUAcceleratorMatrixHYB<ValueType> *cast_mat_hyb;
if ((cast_mat_hyb = dynamic_cast<const GPUAcceleratorMatrixHYB<ValueType>*> (&mat)) != NULL) {
this->Clear();
FATAL_ERROR(__FILE__, __LINE__);
int nnz = 0;
this->nrow_ = cast_mat_hyb->get_nrow();
this->ncol_ = cast_mat_hyb->get_ncol();
this->nnz_ = nnz;
return true;
}
*/
return false;
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::CopyFromHostCSR(const int *row_offset, const int *col, const ValueType *val,
const int nnz, const int nrow, const int ncol) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
assert(row_offset != NULL);
assert(col != NULL);
assert(val != NULL);
// Allocate matrix
if (this->nnz_ > 0)
this->Clear();
if (nnz > 0) {
allocate_gpu(nrow+1, &this->mat_.row_offset);
allocate_gpu(nnz, &this->mat_.col);
allocate_gpu(nnz, &this->mat_.val);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
hipMemcpy(this->mat_.row_offset, // dst
row_offset, // src
(this->nrow_+1)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.col, // dst
col, // src
this->nnz_*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
val, // src
this->nnz_*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Permute( const BaseVector<int> &permutation){
assert(&permutation != NULL);
assert(permutation.get_size() == this->get_nrow());
assert(permutation.get_size() == this->get_ncol());
if (this->get_nnz() > 0) {
int *d_nnzr = NULL;
int *d_nnzrPerm = NULL;
int *d_nnzPerm = NULL;
int *d_offset = NULL;
ValueType *d_data = NULL;
allocate_gpu<int>(this->get_nrow(), &d_nnzr);
allocate_gpu<int>(this->get_nrow(), &d_nnzrPerm);
allocate_gpu<int>((this->get_nrow()+1), &d_nnzPerm);
allocate_gpu<ValueType>(this->get_nnz(), &d_data);
allocate_gpu<int>(this->get_nnz(), &d_offset);
const GPUAcceleratorVector<int> *cast_perm = dynamic_cast<const GPUAcceleratorVector<int>*> (&permutation);
assert(cast_perm != NULL);
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_calc_row_nnz<int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset, d_nnzr);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_row_nnz<int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), d_nnzr, cast_perm->vec_, d_nnzrPerm);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
//TODO
//move in extra file
cum_sum<int, 256>(d_nnzPerm, d_nnzrPerm, this->get_nrow());
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_rows<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0,
this->get_nrow(),
this->mat_.row_offset,
d_nnzPerm,
this->mat_.col,
this->mat_.val,
cast_perm->vec_,
d_nnzr,
d_offset,
d_data);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
free_gpu<int>(&this->mat_.row_offset);
this->mat_.row_offset = d_nnzPerm;
int *d_buffer = NULL;
int *h_buffer = NULL;
int GROUP_SIZE;
int LOCAL_SIZE;
int FinalReduceSize;
allocate_gpu<int>(this->local_backend_.GPU_warp * 4, &d_buffer);
dim3 BlockSize2(this->local_backend_.GPU_block_size);
dim3 GridSize2(this->local_backend_.GPU_warp * 4);
GROUP_SIZE = ( size_t( ( size_t( nrow / ( this->local_backend_.GPU_warp * 4 ) ) + 1 )
/ this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size;
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_max<int, int, 256>), dim3(GridSize2), dim3(BlockSize2), 0, 0, nrow, d_nnzr, d_buffer, GROUP_SIZE, LOCAL_SIZE);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
FinalReduceSize = this->local_backend_.GPU_warp * 4;
allocate_host(FinalReduceSize, &h_buffer);
hipMemcpy(h_buffer, // dst
d_buffer, // src
FinalReduceSize*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&d_buffer);
int maxnnzrow = 0;
for (int i=0; i<FinalReduceSize; ++i)
if (maxnnzrow < h_buffer[i])
maxnnzrow = h_buffer[i];
free_host(&h_buffer);
if (maxnnzrow > 64)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols_fallback<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 32)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 64>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 16)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 32>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 8)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 16>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else if (maxnnzrow > 4)
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 8>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
else
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_permute_cols<ValueType, int, 4>), dim3(GridSize), dim3(BlockSize), 0, 0, this->get_nrow(), this->mat_.row_offset,
cast_perm->vec_, d_nnzrPerm, d_offset,
d_data, this->mat_.col, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
free_gpu<int>(&d_offset);
free_gpu<ValueType>(&d_data);
free_gpu<int>(&d_nnzrPerm);
free_gpu<int>(&d_nnzr);
}
return true;
}
template <>
void GPUAcceleratorMatrixCSR<float>::Apply(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in) ;
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const float scalar = 1.0;
const float beta = 0.0;
stat_t = hipsparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse instead...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_spmv_scalar<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <>
void GPUAcceleratorMatrixCSR<double>::Apply(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in) ;
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out) ;
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const double scalar = 1.0;
const double beta = 0.0;
stat_t = hipsparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val,
this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse instead...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_spmv_scalar<double, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <>
void GPUAcceleratorMatrixCSR<float>::ApplyAdd(const BaseVector<float> &in, const float scalar,
BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const float beta = 1.0;
stat_t = hipsparseScsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse now...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_spmv_scalar<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
scalar, cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
template <>
void GPUAcceleratorMatrixCSR<double>::ApplyAdd(const BaseVector<double> &in, const double scalar,
BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
const double beta = 1.0;
stat_t = hipsparseDcsrmv(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_ncol(), this->get_nnz(), &scalar,
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
cast_in->vec_, &beta,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
// Using cusparse now...
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_spmv_scalar<double, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
scalar, cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
*/
}
}
// Srinivas K
template <>
bool GPUAcceleratorMatrixCSR<float>::ILU0Factorize(void) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDestroySolveAnalysisInfo(infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::ILU0Factorize(void) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
// Srinivas K
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrilu0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDestroySolveAnalysisInfo(infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::ICFactorize(BaseVector<float> *inv_diag) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
//Srinivas K
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_SYMMETRIC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::ICFactorize(BaseVector<double> *inv_diag) {
if (this->get_nnz() > 0) {
hipsparseStatus_t stat_t;
// Srinivas K
/*
cusparseSolveAnalysisInfo_t infoA = 0;
stat_t = cusparseCreateSolveAnalysisInfo(&infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->mat_descr_, HIPSPARSE_MATRIX_TYPE_SYMMETRIC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsric0(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
this->mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
infoA);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
void GPUAcceleratorMatrixCSR<double>::LUAnalyse(void) {
this->LUAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::LUAnalyse(void) {
this->LUAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<float>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LUAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->L_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->L_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->L_mat_descr_ = 0;
this->U_mat_descr_ = 0;
this->L_mat_info_ = 0;
this->U_mat_info_ = 0;
if (this ->tmp_vec_ != NULL) {
delete this->tmp_vec_ ;
this->tmp_vec_ = NULL;
}
*/
}
template <>
bool GPUAcceleratorMatrixCSR<float>::LUSolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ != NULL);
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve L
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::LUSolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve L
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
this->tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
this->tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
void GPUAcceleratorMatrixCSR<double>::LLAnalyse(void) {
this->LLAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<double>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::LLAnalyse(void) {
this->LLAnalyseClear();
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_TRIANGULAR);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
assert(this->get_ncol() == this->get_nrow());
assert(this->tmp_vec_ == NULL);
this->tmp_vec_ = new GPUAcceleratorVector<float>(this->local_backend_);
assert(this->tmp_vec_ != NULL);
tmp_vec_->Allocate(this->get_nrow());
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LLAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->L_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->L_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->L_mat_descr_ = 0;
this->U_mat_descr_ = 0;
this->L_mat_info_ = 0;
this->U_mat_info_ = 0;
*/
if (this ->tmp_vec_ != NULL) {
delete this->tmp_vec_ ;
this->tmp_vec_ = NULL;
}
}
template <>
bool GPUAcceleratorMatrixCSR<double>::LLSolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve L
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
this->tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
this->tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::LLSolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve L
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
this->tmp_vec_->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Solve U
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
this->tmp_vec_->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::LLSolve(const BaseVector<ValueType> &in, const BaseVector<ValueType> &inv_diag,
BaseVector<ValueType> *out) const {
return LLSolve(in, out);
}
template <>
void GPUAcceleratorMatrixCSR<double>::LAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::LAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// L part
stat_t = hipsparseCreateMatDescr(&this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->L_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->L_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->L_mat_descr_, HIPSPARSE_FILL_MODE_LOWER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->L_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <>
void GPUAcceleratorMatrixCSR<double>::UAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// U upart
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseDcsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <>
void GPUAcceleratorMatrixCSR<float>::UAnalyse(const bool diag_unit) {
hipsparseStatus_t stat_t;
/*
// U part
stat_t = hipsparseCreateMatDescr(&this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(this->U_mat_descr_,HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(this->U_mat_descr_,HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatFillMode(this->U_mat_descr_, HIPSPARSE_FILL_MODE_UPPER);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
if (diag_unit == true) {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_UNIT);
} else {
stat_t = hipsparseSetMatDiagType(this->U_mat_descr_, HIPSPARSE_DIAG_TYPE_NON_UNIT);
}
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = cusparseCreateSolveAnalysisInfo(&this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
// Analysis
stat_t = cusparseScsrsv_analysis(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(), this->get_nnz(),
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::LAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->L_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->L_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->L_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->L_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->L_mat_descr_ = 0;
this->L_mat_info_ = 0;
*/
}
template <typename ValueType>
void GPUAcceleratorMatrixCSR<ValueType>::UAnalyseClear(void) {
hipsparseStatus_t stat_t;
/*
if (this->U_mat_info_ != 0) {
stat_t = cusparseDestroySolveAnalysisInfo(this->U_mat_info_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
if (this->U_mat_descr_ != 0) {
stat_t = hipsparseDestroyMatDescr(this->U_mat_descr_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
this->U_mat_descr_ = 0;
this->U_mat_info_ = 0;
*/
}
template <>
bool GPUAcceleratorMatrixCSR<double>::LSolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve L
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::LSolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve L
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->L_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->L_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::USolve(const BaseVector<double> &in, BaseVector<double> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<double> *cast_in = dynamic_cast<const GPUAcceleratorVector<double>*> (&in);
GPUAcceleratorVector<double> *cast_out = dynamic_cast< GPUAcceleratorVector<double>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
double one = double(1.0);
// Solve U
stat_t = cusparseDcsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::USolve(const BaseVector<float> &in, BaseVector<float> *out) const {
if (this->get_nnz() > 0) {
/*
assert(this->L_mat_descr_ != 0);
assert(this->U_mat_descr_ != 0);
assert(this->L_mat_info_ != 0);
assert(this->U_mat_info_ != 0);
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
assert(this->get_ncol() == this->get_nrow());
const GPUAcceleratorVector<float> *cast_in = dynamic_cast<const GPUAcceleratorVector<float>*> (&in);
GPUAcceleratorVector<float> *cast_out = dynamic_cast< GPUAcceleratorVector<float>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
hipsparseStatus_t stat_t;
float one = float(1.0);
// Solve U
stat_t = cusparseScsrsv_solve(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
this->get_nrow(),
&one,
this->U_mat_descr_,
this->mat_.val, this->mat_.row_offset, this->mat_.col,
this->U_mat_info_,
cast_in->vec_,
cast_out->vec_);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractDiagonal(BaseVector<ValueType> *vec_diag) const {
if (this->get_nnz() > 0) {
assert(vec_diag != NULL);
assert(vec_diag->get_size() == this->get_nrow());
GPUAcceleratorVector<ValueType> *cast_vec_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_diag);
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_diag<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col, this->mat_.val,
cast_vec_diag->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractInverseDiagonal(BaseVector<ValueType> *vec_inv_diag) const {
if (this->get_nnz() > 0) {
assert(vec_inv_diag != NULL);
assert(vec_inv_diag->get_size() == this->get_nrow());
GPUAcceleratorVector<ValueType> *cast_vec_inv_diag = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec_inv_diag);
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_inv_diag<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
this->mat_.val, cast_vec_inv_diag->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractSubMatrix(const int row_offset,
const int col_offset,
const int row_size,
const int col_size,
BaseMatrix<ValueType> *mat) const {
assert(mat != NULL);
assert(row_offset >= 0);
assert(col_offset >= 0);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (mat) ;
assert(cast_mat != NULL);
int mat_nnz = 0;
int *row_nnz = NULL;
//int *red_row_nnz (int *) malloc(sizeof(int)*(row_size+1));
int *sub_nnz = NULL;
allocate_gpu<int>(row_size+1, &sub_nnz);
allocate_gpu(row_size+1, &row_nnz);
// compute the nnz per row in the new matrix
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(row_size / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_submatrix_row_nnz<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset, this->mat_.col, this->mat_.val,
row_offset, col_offset,
row_size, col_size,
row_nnz);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// compute the new nnz by reduction
// CPU reduction
/*
hipMemcpy(red_row_nnz, // dst
row_nnz, // src
(row_size+1)*sizeof(int), // size
hipMemcpyDeviceToHost);
int sum=0;
for (int i=0; i<row_size; ++i) {
int tmp = red_row_nnz[i];
red_row_nnz[i] = sum;
sum += tmp;
}
mat_nnz = red_row_nnz[row_size] = sum ;
*/
//TODO
//move in extra file
cum_sum<int, 256>(sub_nnz, row_nnz, row_size);
hipMemcpy(&mat_nnz, &sub_nnz[row_size],
sizeof(int), hipMemcpyDeviceToHost);
// not empty submatrix
if (mat_nnz > 0) {
cast_mat->AllocateCSR(mat_nnz, row_size, col_size);
// part of the CPU reduction section
/*
hipMemcpy(cast_mat->mat_.row_offset, // dst
red_row_nnz, // src
(row_size+1)*sizeof(int), // size
hipMemcpyHostToDevice);
*/
free_gpu<int>(&cast_mat->mat_.row_offset);
cast_mat->mat_.row_offset = sub_nnz;
// copying the sub matrix
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_submatrix_copy<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset, this->mat_.col, this->mat_.val,
row_offset, col_offset,
row_size, col_size,
cast_mat->mat_.row_offset, cast_mat->mat_.col, cast_mat->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
free_gpu(&row_nnz);
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractL(BaseMatrix<ValueType> *L) const {
assert(L != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L);
assert(cast_L != NULL);
cast_L->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_slower_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_L->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_L->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_L->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_L->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val);
// fill lower triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_l_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_L->mat_.row_offset,
cast_L->mat_.col,
cast_L->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_L->nrow_ = this->get_nrow();
cast_L->ncol_ = this->get_ncol();
cast_L->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractLDiagonal(BaseMatrix<ValueType> *L) const {
assert(L != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_L = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (L);
assert(cast_L != NULL);
cast_L->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_L->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_lower_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_L->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_L->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_L->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_L->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_L->mat_.val);
// fill lower triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_l_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_L->mat_.row_offset,
cast_L->mat_.col,
cast_L->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_L->nrow_ = this->get_nrow();
cast_L->ncol_ = this->get_ncol();
cast_L->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractU(BaseMatrix<ValueType> *U) const {
assert(U != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U);
assert(cast_U != NULL);
cast_U->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_supper_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_U->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_U->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_U->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_U->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val);
// fill upper triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_u_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_U->mat_.row_offset,
cast_U->mat_.col,
cast_U->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_U->nrow_ = this->get_nrow();
cast_U->ncol_ = this->get_ncol();
cast_U->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractUDiagonal(BaseMatrix<ValueType> *U) const {
assert(U != NULL);
assert(this->get_nrow() > 0);
assert(this->get_ncol() > 0);
GPUAcceleratorMatrixCSR<ValueType> *cast_U = dynamic_cast<GPUAcceleratorMatrixCSR<ValueType>*> (U);
assert(cast_U != NULL);
cast_U->Clear();
// compute nnz per row
int nrow = this->get_nrow();
allocate_gpu<int>(nrow+1, &cast_U->mat_.row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_upper_nnz_per_row<int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, cast_U->mat_.row_offset+1);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
// partial sum row_nnz to obtain row_offset vector
// TODO currently performing partial sum on host
int *h_buffer = NULL;
allocate_host(nrow+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
cast_U->mat_.row_offset+1, // src
nrow*sizeof(int), // size
hipMemcpyDeviceToHost);
h_buffer[0] = 0;
for (int i=1; i<nrow+1; ++i)
h_buffer[i] += h_buffer[i-1];
int nnz_L = h_buffer[nrow];
hipMemcpy(cast_U->mat_.row_offset, // dst
h_buffer, // src
(nrow+1)*sizeof(int), // size
hipMemcpyHostToDevice);
free_host(&h_buffer);
// end TODO
// allocate lower triangular part structure
allocate_gpu<int>(nnz_L, &cast_U->mat_.col);
allocate_gpu<ValueType>(nnz_L, &cast_U->mat_.val);
// fill lower triangular part
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_u_triangular<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
this->mat_.col, this->mat_.val,
cast_U->mat_.row_offset,
cast_U->mat_.col,
cast_U->mat_.val);
CHECK_CUDA_ERROR(__FILE__,__LINE__);
cast_U->nrow_ = this->get_nrow();
cast_U->ncol_ = this->get_ncol();
cast_U->nnz_ = nnz_L;
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MaximalIndependentSet(int &size,
BaseVector<int> *permutation) const {
assert(permutation != NULL);
GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation);
assert(cast_perm != NULL);
assert(this->get_nrow() == this->get_ncol());
int *h_row_offset = NULL;
int *h_col = NULL;
allocate_host(this->get_nrow()+1, &h_row_offset);
allocate_host(this->get_nnz(), &h_col);
hipMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), hipMemcpyDeviceToHost);
int *mis = NULL;
allocate_host(this->get_nrow(), &mis);
memset(mis, 0, sizeof(int)*this->get_nrow());
size = 0 ;
for (int ai=0; ai<this->get_nrow(); ++ai) {
if (mis[ai] == 0) {
// set the node
mis[ai] = 1;
++size ;
//remove all nbh nodes (without diagonal)
for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj)
if (ai != h_col[aj])
mis[h_col[aj]] = -1 ;
}
}
int *h_perm = NULL;
allocate_host(this->get_nrow(), &h_perm);
int pos = 0;
for (int ai=0; ai<this->get_nrow(); ++ai) {
if (mis[ai] == 1) {
h_perm[ai] = pos;
++pos;
} else {
h_perm[ai] = size + ai - pos;
}
}
// Check the permutation
//
// for (int ai=0; ai<this->get_nrow(); ++ai) {
// assert( h_perm[ai] >= 0 );
// assert( h_perm[ai] < this->get_nrow() );
// }
cast_perm->Allocate(this->get_nrow());
hipMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), hipMemcpyHostToDevice);
free_host(&h_row_offset);
free_host(&h_col);
free_host(&h_perm);
free_host(&mis);
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MultiColoring(int &num_colors,
int **size_colors,
BaseVector<int> *permutation) const {
assert(permutation != NULL);
GPUAcceleratorVector<int> *cast_perm = dynamic_cast<GPUAcceleratorVector<int>*> (permutation);
assert(cast_perm != NULL);
// node colors (init value = 0 i.e. no color)
int *color = NULL;
int *h_row_offset = NULL;
int *h_col = NULL;
int size = this->get_nrow();
allocate_host(size, &color);
allocate_host(this->get_nrow()+1, &h_row_offset);
allocate_host(this->get_nnz(), &h_col);
hipMemcpy(h_row_offset, this->mat_.row_offset, (this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_col, this->mat_.col, this->get_nnz()*sizeof(int), hipMemcpyDeviceToHost);
memset(color, 0, size*sizeof(int));
num_colors = 0;
std::vector<bool> row_col;
for (int ai=0; ai<this->get_nrow(); ++ai) {
color[ai] = 1;
row_col.clear();
row_col.assign(num_colors+2, false);
for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj)
if (ai != h_col[aj])
row_col[color[h_col[aj]]] = true;
for (int aj=h_row_offset[ai]; aj<h_row_offset[ai+1]; ++aj)
if (row_col[color[ai]] == true)
++color[ai];
if (color[ai] > num_colors)
num_colors = color[ai];
}
free_host(&h_row_offset);
free_host(&h_col);
allocate_host(num_colors, size_colors);
set_to_zero_host(num_colors, *size_colors);
int *offsets_color = NULL;
allocate_host(num_colors, &offsets_color);
memset(offsets_color, 0, sizeof(int)*num_colors);
for (int i=0; i<this->get_nrow(); ++i)
++(*size_colors)[color[i]-1];
int total=0;
for (int i=1; i<num_colors; ++i) {
total += (*size_colors)[i-1];
offsets_color[i] = total;
// LOG_INFO("offsets = " << total);
}
int *h_perm = NULL;
allocate_host(this->get_nrow(), &h_perm);
for (int i=0; i<this->get_nrow(); ++i) {
h_perm[i] = offsets_color[ color[i]-1 ] ;
++offsets_color[color[i]-1];
}
cast_perm->Allocate(this->get_nrow());
hipMemcpy(cast_perm->vec_, h_perm, permutation->get_size()*sizeof(int), hipMemcpyHostToDevice);
free_host(&h_perm);
free_host(&color);
free_host(&offsets_color);
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::Scale(const double alpha) {
if (this->get_nnz() > 0) {
hipblasStatus_t stat_t;
stat_t = hipblasDscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle),
this->get_nnz(), &alpha,
this->mat_.val, 1);
CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::Scale(const float alpha) {
if (this->get_nnz() > 0) {
hipblasStatus_t stat_t;
stat_t = hipblasSscal(CUBLAS_HANDLE(this->local_backend_.GPU_cublas_handle),
this->get_nnz(), &alpha,
this->mat_.val, 1);
CHECK_CUBLAS_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ScaleDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_scale_diagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ScaleOffDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_scale_offdiagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_diagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::AddScalarOffDiagonal(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_offdiagonal<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::AddScalar(const ValueType alpha) {
if (this->get_nnz() > 0) {
int nnz = this->get_nnz();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nnz / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_buffer_addscalar<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nnz, alpha, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMultR(const BaseVector<ValueType> &diag) {
assert(diag.get_size() == this->get_ncol());
const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag);
assert(cast_diag!= NULL);
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_diagmatmult_r<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset, this->mat_.col,
cast_diag->vec_, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::DiagonalMatrixMultL(const BaseVector<ValueType> &diag) {
assert(diag.get_size() == this->get_ncol());
const GPUAcceleratorVector<ValueType> *cast_diag = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&diag);
assert(cast_diag!= NULL);
if (this->get_nnz() > 0) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_diagmatmult_l<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow, this->mat_.row_offset,
cast_diag->vec_, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MatMatMult(const BaseMatrix<ValueType> &A, const BaseMatrix<ValueType> &B) {
assert(A.get_ncol() == B.get_nrow());
assert(A.get_nrow() > 0);
assert(B.get_ncol() > 0);
assert(B.get_nrow() > 0);
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_A = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&A);
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_B = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&B);
assert(cast_mat_A != NULL);
assert(cast_mat_B != NULL);
this->Clear();
int m = cast_mat_A->get_nrow();
int n = cast_mat_B->get_ncol();
int k = cast_mat_B->get_nrow();
int nnzC = 0;
allocate_gpu(m+1, &this->mat_.row_offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipsparseStatus_t stat_t;
stat_t = hipsparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_POINTER_MODE_HOST);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseXcsrgemmNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m, n, k,
cast_mat_A->mat_descr_, cast_mat_A->get_nnz(),
cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col,
cast_mat_B->mat_descr_, cast_mat_B->get_nnz(),
cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col,
this->mat_descr_, this->mat_.row_offset,
&nnzC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
allocate_gpu(nnzC, &this->mat_.col);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
allocate_gpu(nnzC, &this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
this->nrow_ = m;
this->ncol_ = n;
this->nnz_ = nnzC;
stat_t = __cusparseXcsrgemm__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
m, n, k,
// A
cast_mat_A->mat_descr_, cast_mat_A->get_nnz(),
cast_mat_A->mat_.val,
cast_mat_A->mat_.row_offset, cast_mat_A->mat_.col,
// B
cast_mat_B->mat_descr_, cast_mat_B->get_nnz(),
cast_mat_B->mat_.val,
cast_mat_B->mat_.row_offset, cast_mat_B->mat_.col,
// C
this->mat_descr_,
this->mat_.val,
this->mat_.row_offset, this->mat_.col);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Gershgorin(ValueType &lambda_min,
ValueType &lambda_max) const {
return false;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::MatrixAdd(const BaseMatrix<ValueType> &mat, const ValueType alpha,
const ValueType beta, const bool structure) {
if (this->get_nnz() > 0) {
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat);
assert(cast_mat != NULL);
assert(cast_mat->get_nrow() == this->get_nrow());
assert(cast_mat->get_ncol() == this->get_ncol());
assert(this ->get_nnz() > 0);
assert(cast_mat->get_nnz() > 0);
if (structure == false) {
int nrow = this->get_nrow();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_add_csr_same_struct<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, nrow,
this->mat_.row_offset, this->mat_.col,
cast_mat->mat_.row_offset,
cast_mat->mat_.col, cast_mat->mat_.val,
alpha, beta, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
} else {
// New structure with CUSPARSE routines
int m = this->get_nrow();
int n = this->get_ncol();
int *csrRowPtrC = NULL;
int *csrColC = NULL;
ValueType *csrValC = NULL;
int nnzC;
allocate_gpu(m+1, &csrRowPtrC);
hipsparseStatus_t stat_t;
hipsparseMatDescr_t desc_mat_C = 0;
stat_t = hipsparseCreateMatDescr(&desc_mat_C);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatIndexBase(desc_mat_C, HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetMatType(desc_mat_C, HIPSPARSE_MATRIX_TYPE_GENERAL);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
stat_t = hipsparseSetPointerMode(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
HIPSPARSE_POINTER_MODE_HOST);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
/*
stat_t = cusparseXcsrgeamNnz(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
m, n,
this->mat_descr_, this->get_nnz(),
this->mat_.row_offset, this->mat_.col,
cast_mat->mat_descr_, cast_mat->get_nnz(),
cast_mat->mat_.row_offset, cast_mat->mat_.col,
desc_mat_C, csrRowPtrC,
&nnzC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
allocate_gpu(nnzC, &csrColC);
allocate_gpu(nnzC, &csrValC);
stat_t = __cusparseXcsrgeam__(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
m, n,
// A
&alpha,
this->mat_descr_, this->get_nnz(),
this->mat_.val,
this->mat_.row_offset, this->mat_.col,
// B
&beta,
cast_mat->mat_descr_, cast_mat->get_nnz(),
cast_mat->mat_.val,
cast_mat->mat_.row_offset, cast_mat->mat_.col,
// C
desc_mat_C,
csrValC,
csrRowPtrC, csrColC);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
*/
stat_t = hipsparseDestroyMatDescr(desc_mat_C);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
this->Clear();
this->mat_.row_offset = csrRowPtrC;
this->mat_.col = csrColC;
this->mat_.val = csrValC;
this->nrow_ = m;
this->ncol_ = n;
this->nnz_ = nnzC;
}
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::Compress(const double drop_off) {
if (this->get_nnz() > 0) {
GPUAcceleratorMatrixCSR<ValueType> tmp(this->local_backend_);
tmp.CopyFrom(*this);
int mat_nnz = 0;
int *row_offset = NULL;
allocate_gpu(this->get_nrow()+1, &row_offset);
int *mat_row_offset = NULL;
allocate_gpu(this->get_nrow()+1, &mat_row_offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
this->get_nrow()+1, row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(this->get_nrow() / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_compress_count_nrow<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
this->get_nrow(),
drop_off,
row_offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// TODO
cum_sum<int, 256>(mat_row_offset, row_offset, this->get_nrow());
// get the new mat nnz
hipMemcpy(&mat_nnz, &mat_row_offset[this->get_nrow()],
sizeof(int), hipMemcpyDeviceToHost);
this->AllocateCSR(mat_nnz, this->get_nrow(), this->get_ncol());
// TODO - just exchange memory pointers
// copy row_offset
hipMemcpy(this->mat_.row_offset, mat_row_offset,
(this->get_nrow()+1)*sizeof(int), hipMemcpyDeviceToDevice);
// copy col and val
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_compress_copy<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, tmp.mat_.row_offset,
tmp.mat_.col,
tmp.mat_.val,
tmp.get_nrow(),
drop_off,
this->mat_.row_offset,
this->mat_.col,
this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu(&row_offset);
free_gpu(&mat_row_offset);
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<double>::Transpose(void) {
if (this->get_nnz() > 0) {
GPUAcceleratorMatrixCSR<double> tmp(this->local_backend_);
tmp.CopyFrom(*this);
this->Clear();
this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow());
hipsparseStatus_t stat_t;
stat_t = hipsparseDcsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(),
tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col,
this->mat_.val, this->mat_.col, this->mat_.row_offset,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <>
bool GPUAcceleratorMatrixCSR<float>::Transpose(void) {
if (this->get_nnz() > 0) {
GPUAcceleratorMatrixCSR<float> tmp(this->local_backend_);
tmp.CopyFrom(*this);
this->Clear();
this->AllocateCSR(tmp.get_nnz(), tmp.get_ncol(), tmp.get_nrow());
hipsparseStatus_t stat_t;
stat_t = hipsparseScsr2csc(CUSPARSE_HANDLE(this->local_backend_.GPU_cusparse_handle),
tmp.get_nrow(), tmp.get_ncol(), tmp.get_nnz(),
tmp.mat_.val, tmp.mat_.row_offset, tmp.mat_.col,
this->mat_.val, this->mat_.col, this->mat_.row_offset,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO);
CHECK_CUSPARSE_ERROR(stat_t, __FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ReplaceColumnVector(const int idx, const BaseVector<ValueType> &vec) {
assert(&vec != NULL);
assert(vec.get_size() == this->nrow_);
if (this->get_nnz() > 0) {
const GPUAcceleratorVector<ValueType> *cast_vec = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&vec);
assert(cast_vec != NULL);
int *row_offset = NULL;
int *col = NULL;
ValueType *val = NULL;
int nrow = this->get_nrow();
int ncol = this->get_ncol();
allocate_gpu(nrow+1, &row_offset);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_replace_column_vector_offset<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
nrow,
idx,
cast_vec->vec_,
row_offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
int *host_offset = NULL;
allocate_host(nrow+1, &host_offset);
hipMemcpy(host_offset,
row_offset,
sizeof(int)*(nrow+1),
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
host_offset[0] = 0;
for (int i=0; i<nrow; ++i)
host_offset[i+1] += host_offset[i];
int nnz = host_offset[nrow];
hipMemcpy(row_offset,
host_offset,
sizeof(int)*(nrow+1),
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
allocate_gpu(nnz, &col);
allocate_gpu(nnz, &val);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_replace_column_vector<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
nrow,
idx,
cast_vec->vec_,
row_offset,
col,
val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
this->Clear();
this->SetDataPtrCSR(&row_offset, &col, &val, nnz, nrow, ncol);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractColumnVector(const int idx, BaseVector<ValueType> *vec) const {
assert(vec != NULL);
assert(vec->get_size() == this->nrow_);
if (this->get_nnz() > 0) {
GPUAcceleratorVector<ValueType> *cast_vec = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec);
assert(cast_vec != NULL);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(this->get_nrow() / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_column_vector<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
this->get_nrow(),
idx,
cast_vec->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
template <typename ValueType>
bool GPUAcceleratorMatrixCSR<ValueType>::ExtractRowVector(const int idx, BaseVector<ValueType> *vec) const {
assert(vec != NULL);
assert(vec->get_size() == this->ncol_);
if (this->get_nnz() > 0) {
GPUAcceleratorVector<ValueType> *cast_vec = dynamic_cast<GPUAcceleratorVector<ValueType>*> (vec);
assert(cast_vec != NULL);
cast_vec->Zeros();
// Get nnz of row idx
int nnz[2];
hipMemcpy(nnz,
this->mat_.row_offset+idx,
2*sizeof(int),
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
int row_nnz = nnz[1] - nnz[0];
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(row_nnz / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(HIP_KERNEL_NAME(kernel_csr_extract_row_vector<ValueType, int>), dim3(GridSize), dim3(BlockSize), 0, 0, this->mat_.row_offset,
this->mat_.col,
this->mat_.val,
row_nnz,
idx,
cast_vec->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
return true;
}
// Srinivas K
template class GPUAcceleratorMatrixCSR<double>;
template class GPUAcceleratorMatrixCSR<float>;
}
|
f15cb158d1c4d6b7709cf20b3ff1fc048eb16040.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void MyKernel(double *array, int arrayCount)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount)
{
array[idx] = (double)idx;
}
}
void launchMyKernel(double *array, int arrayCount)
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
MyKernel, 0, 0);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( MyKernel), dim3(gridSize), dim3(blockSize) , 0, 0, array, arrayCount);
hipDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
MyKernel, blockSize,
0);
int device;
hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
blockSize, occupancy);
}
int main(int argc, char *argv[]) {
if (argc < 2) {
printf("Error: run program with 1 args: array size\n");
return 1;
}
double *array, arrayCount = atoi(argv[1]);
hipMalloc((void**)&array, arrayCount * sizeof(double));
launchMyKernel(array, arrayCount);
return 0;
}
|
f15cb158d1c4d6b7709cf20b3ff1fc048eb16040.cu
|
#include <stdio.h>
__global__ void MyKernel(double *array, int arrayCount)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount)
{
array[idx] = (double)idx;
}
}
void launchMyKernel(double *array, int arrayCount)
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
MyKernel, 0, 0);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
MyKernel<<< gridSize, blockSize >>>(array, arrayCount);
cudaDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
MyKernel, blockSize,
0);
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
blockSize, occupancy);
}
int main(int argc, char *argv[]) {
if (argc < 2) {
printf("Error: run program with 1 args: array size\n");
return 1;
}
double *array, arrayCount = atoi(argv[1]);
cudaMalloc((void**)&array, arrayCount * sizeof(double));
launchMyKernel(array, arrayCount);
return 0;
}
|
00a1430e2df3af970288cacfa36b4379b46c9100.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void Grayscale2 ( unsigned int *dst, int imageW, int imageH)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
float4 fnew = tex2D(texImage, ix, iy);
float gray = (fnew.x + fnew.y + fnew.z)/3;
dst[imageW * iy + ix] = make_color(gray, gray, gray, 1.0f);
}
}
__global__ void Grayscale ( unsigned int *dst, int imageW, int imageH, float brightness, float contrast)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
float4 fresult = tex2D(texImage, ix, iy);
float4 fnew = adjust_contrast(fresult, contrast);
fnew = adjust_brightness(fnew, brightness);
float gray = (fnew.x + fnew.y + fnew.z)/3;
dst[imageW * iy + ix] = make_color(gray, gray, gray, 1.0f);
}
}
extern "C" float grayImageWrapper (unsigned int *dst, int imageW, int imageH, float brightness, float contrast, int adjust)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
unsigned int timer;
float runtime;
cutCreateTimer(&timer);
cutStartTimer(timer);
if(adjust)
hipLaunchKernelGGL(( Grayscale), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, brightness, contrast);
else
hipLaunchKernelGGL(( Grayscale2), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH);
hipDeviceSynchronize();
cutStopTimer(timer);
runtime = cutGetTimerValue(timer)/1000;
cutDeleteTimer(timer);
return runtime;
}
|
00a1430e2df3af970288cacfa36b4379b46c9100.cu
|
__global__ void Grayscale2 ( unsigned int *dst, int imageW, int imageH)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
float4 fnew = tex2D(texImage, ix, iy);
float gray = (fnew.x + fnew.y + fnew.z)/3;
dst[imageW * iy + ix] = make_color(gray, gray, gray, 1.0f);
}
}
__global__ void Grayscale ( unsigned int *dst, int imageW, int imageH, float brightness, float contrast)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
float4 fresult = tex2D(texImage, ix, iy);
float4 fnew = adjust_contrast(fresult, contrast);
fnew = adjust_brightness(fnew, brightness);
float gray = (fnew.x + fnew.y + fnew.z)/3;
dst[imageW * iy + ix] = make_color(gray, gray, gray, 1.0f);
}
}
extern "C" float grayImageWrapper (unsigned int *dst, int imageW, int imageH, float brightness, float contrast, int adjust)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
unsigned int timer;
float runtime;
cutCreateTimer(&timer);
cutStartTimer(timer);
if(adjust)
Grayscale<<<grid, threads>>>(dst, imageW, imageH, brightness, contrast);
else
Grayscale2<<<grid, threads>>>(dst, imageW, imageH);
cudaThreadSynchronize();
cutStopTimer(timer);
runtime = cutGetTimerValue(timer)/1000;
cutDeleteTimer(timer);
return runtime;
}
|
db9cfa521c75a270d5e5471af8c6fe5e3ddd7e4f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <ctime>
#include <iostream>
__global__ void matmul(float* matA, float* matB, float* matC, int width){
float pVal = 0;
for(int i=0; i<width; ++i){
float elementMatA = matA[threadIdx.y*width+i];
float elementMatB = matB[i*width+threadIdx.x];
pVal += elementMatA * elementMatB;
}
matC[threadIdx.y*width+threadIdx.x] = pVal;
}
void matriksMul(float* mA, float* mB, float* mC, int width){
//Device pointer
float* a_d, *b_d, *c_d;
//Matriks size
int size = width * width *sizeof(float) ;
//allocate dan copy matriks a
int hipError_t = hipMalloc((void**)&a_d, size);
if (hipError_t != hipSuccess)
{
fprintf(stderr, "Error invoking hipMemcpy (ERRCODE %d)\n", hipError_t);
}
fprintf(stderr, "hipMemcpy (ERRCODE %d)\n", hipError_t);
hipMemcpy(a_d, mA, size , hipMemcpyHostToDevice );
//allocate dan copy matriks b
hipMalloc((void**)&b_d, size);
hipMemcpy(b_d, mB, size , hipMemcpyHostToDevice );
//allocate memory to device c
hipMalloc((void**)&c_d, size);
dim3 dimGrid(1, 1);
dim3 dimBlock(width, width);
hipLaunchKernelGGL(( matmul), dim3(dimGrid),dim3(dimBlock), 0, 0, a_d,b_d,c_d,width);
hipMemcpy(mC,c_d,size, hipMemcpyDeviceToHost );
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
}
int main(void){
void matriksMul(float *, float *, float *, int);
const int width = 10;
float* M, *N, *P;
size_t size = width * width *sizeof(float);
// allocate arrays on host
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
// float M[width*width], N[width*width], P[width*width];
for(int i = 0; i < (width*width) ; i++) {
M[i] = i;
N[i] = width*width - i;
P[i] = 0.f;
// printf("%3f %3f %3f\n", M[i], N[i], P[i]);
}
matriksMul(M, N, P, width);
for(int i = 0; i < (width*width) ; i++) {
printf("%f", P[i]);
if( i%width ==0){
printf("\n");
}
}
free(M);
free(N);
free(P);
return 0;
}
|
db9cfa521c75a270d5e5471af8c6fe5e3ddd7e4f.cu
|
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cstdlib>
#include <ctime>
#include <iostream>
__global__ void matmul(float* matA, float* matB, float* matC, int width){
float pVal = 0;
for(int i=0; i<width; ++i){
float elementMatA = matA[threadIdx.y*width+i];
float elementMatB = matB[i*width+threadIdx.x];
pVal += elementMatA * elementMatB;
}
matC[threadIdx.y*width+threadIdx.x] = pVal;
}
void matriksMul(float* mA, float* mB, float* mC, int width){
//Device pointer
float* a_d, *b_d, *c_d;
//Matriks size
int size = width * width *sizeof(float) ;
//allocate dan copy matriks a
int cudaError = cudaMalloc((void**)&a_d, size);
if (cudaError != cudaSuccess)
{
fprintf(stderr, "Error invoking cudaMemcpy (ERRCODE %d)\n", cudaError);
}
fprintf(stderr, "cudaMemcpy (ERRCODE %d)\n", cudaError);
cudaMemcpy(a_d, mA, size , cudaMemcpyHostToDevice );
//allocate dan copy matriks b
cudaMalloc((void**)&b_d, size);
cudaMemcpy(b_d, mB, size , cudaMemcpyHostToDevice );
//allocate memory to device c
cudaMalloc((void**)&c_d, size);
dim3 dimGrid(1, 1);
dim3 dimBlock(width, width);
matmul<<<dimGrid,dimBlock>>>(a_d,b_d,c_d,width);
cudaMemcpy(mC,c_d,size, cudaMemcpyDeviceToHost );
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
int main(void){
void matriksMul(float *, float *, float *, int);
const int width = 10;
float* M, *N, *P;
size_t size = width * width *sizeof(float);
// allocate arrays on host
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
// float M[width*width], N[width*width], P[width*width];
for(int i = 0; i < (width*width) ; i++) {
M[i] = i;
N[i] = width*width - i;
P[i] = 0.f;
// printf("%3f %3f %3f\n", M[i], N[i], P[i]);
}
matriksMul(M, N, P, width);
for(int i = 0; i < (width*width) ; i++) {
printf("%f", P[i]);
if( i%width ==0){
printf("\n");
}
}
free(M);
free(N);
free(P);
return 0;
}
|
f4c763bd5e58fef7a385fd7ba4e0224eb5337819.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "darknetadd.h"
#include <iostream>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define CHECK_CUDA(call) do { \
hipError_t status = call; \
if( status != hipSuccess ) { \
return status; \
} \
} while(0)
template <typename Data>
__global__ void shortcut_kernel(int size, int src_outputs, int add_outputs,const Data *in,const Data *add, Data *out)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
Data out_val = in[id];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
out_val += add[add_index];
}
out[id] = out_val;
}
template <typename Data>
inline int addlayer(hipStream_t stream, int n,int input1size,int input2size, const Data* input1,const Data* input2,Data * output)
{
const int blockSize = 1024;
const int gridSize = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( shortcut_kernel), dim3(gridSize), dim3(blockSize), 0, stream, n, input1size,input2size,input1,input2,output);
CHECK_CUDA(hipPeekAtLastError());
return 0;
}
ADDPlugin::ADDPlugin():_initialized(false){
}
int ADDPlugin::initialize() {
if(_initialized) return 0;
_initialized = true;
return 0;
}
void ADDPlugin::terminate() {
if (!_initialized) {
return;
}
_initialized = false;
}
ADDPlugin::~ADDPlugin() {
terminate();
}
nvinfer1::Dims ADDPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) {
assert(index == 0);
assert(inputDims);
assert(nbInputs == 2);
return inputDims[0];
}
size_t ADDPlugin::getWorkspaceSize(int maxBatchSize) const {
return 0;
}
int ADDPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace,
hipStream_t stream) {
nvinfer1::Dims input_dims1 = this->getInputDims(0);
nvinfer1::Dims input_dims2 = this->getInputDims(1);
nvinfer1::DataType type = this->getDataType();
const int size_1 = input_dims1.d[0]*input_dims1.d[1]*input_dims1.d[2];
const int size_2 = input_dims2.d[0]*input_dims2.d[1]*input_dims2.d[2];
const int num = batchSize*size_1;
switch (type)
{
case nvinfer1::DataType::kFLOAT:
{
const float* input_data_1 = static_cast<const float*>(inputs[0]);
const float* input_data_2 = static_cast<const float*>(inputs[1]);
float* out_data= static_cast<float*>(outputs[0]);
addlayer(stream,num,size_1,size_2,input_data_1,input_data_2,out_data);
break;
}
case nvinfer1::DataType::kHALF:
{
const half* input_data_1 = static_cast<const half*>(inputs[0]);
const half* input_data_2 = static_cast<const half*>(inputs[1]);
half* out_data= static_cast<half*>(outputs[0]);
addlayer(stream,num,size_1,size_2,input_data_1,input_data_2,out_data);
break;
}
default: std::cerr << "error data type" << std::endl;;
}
return 0;
}
|
f4c763bd5e58fef7a385fd7ba4e0224eb5337819.cu
|
#include "darknetadd.h"
#include <iostream>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define CHECK_CUDA(call) do { \
cudaError_t status = call; \
if( status != cudaSuccess ) { \
return status; \
} \
} while(0)
template <typename Data>
__global__ void shortcut_kernel(int size, int src_outputs, int add_outputs,const Data *in,const Data *add, Data *out)
{
const int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
Data out_val = in[id];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
out_val += add[add_index];
}
out[id] = out_val;
}
template <typename Data>
inline int addlayer(cudaStream_t stream, int n,int input1size,int input2size, const Data* input1,const Data* input2,Data * output)
{
const int blockSize = 1024;
const int gridSize = (n + blockSize - 1) / blockSize;
shortcut_kernel<<<gridSize, blockSize, 0, stream>>>(n, input1size,input2size,input1,input2,output);
CHECK_CUDA(cudaPeekAtLastError());
return 0;
}
ADDPlugin::ADDPlugin():_initialized(false){
}
int ADDPlugin::initialize() {
if(_initialized) return 0;
_initialized = true;
return 0;
}
void ADDPlugin::terminate() {
if (!_initialized) {
return;
}
_initialized = false;
}
ADDPlugin::~ADDPlugin() {
terminate();
}
nvinfer1::Dims ADDPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) {
assert(index == 0);
assert(inputDims);
assert(nbInputs == 2);
return inputDims[0];
}
size_t ADDPlugin::getWorkspaceSize(int maxBatchSize) const {
return 0;
}
int ADDPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace,
cudaStream_t stream) {
nvinfer1::Dims input_dims1 = this->getInputDims(0);
nvinfer1::Dims input_dims2 = this->getInputDims(1);
nvinfer1::DataType type = this->getDataType();
const int size_1 = input_dims1.d[0]*input_dims1.d[1]*input_dims1.d[2];
const int size_2 = input_dims2.d[0]*input_dims2.d[1]*input_dims2.d[2];
const int num = batchSize*size_1;
switch (type)
{
case nvinfer1::DataType::kFLOAT:
{
const float* input_data_1 = static_cast<const float*>(inputs[0]);
const float* input_data_2 = static_cast<const float*>(inputs[1]);
float* out_data= static_cast<float*>(outputs[0]);
addlayer(stream,num,size_1,size_2,input_data_1,input_data_2,out_data);
break;
}
case nvinfer1::DataType::kHALF:
{
const half* input_data_1 = static_cast<const half*>(inputs[0]);
const half* input_data_2 = static_cast<const half*>(inputs[1]);
half* out_data= static_cast<half*>(outputs[0]);
addlayer(stream,num,size_1,size_2,input_data_1,input_data_2,out_data);
break;
}
default: std::cerr << "error data type" << std::endl;;
}
return 0;
}
|
4466e113b1ec2e41388baa22168eb29470ffc2f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
__global__ void cube(double* d_out, double* d_in)
{
int idx = threadIdx.x;
double f = d_in[idx];
d_out[idx] = f*f*f;
}
int main()
{
const uint64_t ARRAY_SIZE = 100;
const uint64_t ARRAY_BYTES = ARRAY_SIZE * sizeof(double);
double h_mult[ARRAY_SIZE];
for(auto i{0};i < ARRAY_SIZE;i++)
{
h_mult[i] = double(i);
}
double h_mult_out[ARRAY_SIZE];
double * d_in;
double * d_out;
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMemcpy(d_in, h_mult, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE) , 0, 0, d_out,d_in);
hipMemcpy(h_mult_out,d_out,ARRAY_BYTES, hipMemcpyDeviceToHost);
for(auto i{0}; i < ARRAY_SIZE; i++)
{
std::cout << h_mult_out[i] << std::endl;
}
}
|
4466e113b1ec2e41388baa22168eb29470ffc2f2.cu
|
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
__global__ void cube(double* d_out, double* d_in)
{
int idx = threadIdx.x;
double f = d_in[idx];
d_out[idx] = f*f*f;
}
int main()
{
const uint64_t ARRAY_SIZE = 100;
const uint64_t ARRAY_BYTES = ARRAY_SIZE * sizeof(double);
double h_mult[ARRAY_SIZE];
for(auto i{0};i < ARRAY_SIZE;i++)
{
h_mult[i] = double(i);
}
double h_mult_out[ARRAY_SIZE];
double * d_in;
double * d_out;
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_mult, ARRAY_BYTES, cudaMemcpyHostToDevice);
cube<<<1, ARRAY_SIZE >>>(d_out,d_in);
cudaMemcpy(h_mult_out,d_out,ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(auto i{0}; i < ARRAY_SIZE; i++)
{
std::cout << h_mult_out[i] << std::endl;
}
}
|
ffc618e53f5c50a8b99437926a4307adaa84e535.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
// Dimensione del blocco
#define BDIMX 16
#define BDIMY 16
// macro x conversione indici lineari
#define INDEX(rows, cols, stride) (rows * stride + cols)
// prototipi funzioni
void initialData(float*, const int);
void printData(float*, const int);
void checkResult(float*, float*, int, int);
void transposeHost(float*, float*, const int, const int);
__global__ void copyGmem(float*, float*, const int, const int);
__global__ void naiveGmem(float*, float*, const int, const int);
/*
* Kernel per il calcolo della matrice trasposta usando la shared memory
*/
__global__ void transposeSmem(float *out, float *in, int nrows, int ncols) {
// TODO
}
int main(int argc, char **argv) {
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool iprint = 0;
// set up array size
int nrows = 1 << 10;
int ncols = 1 << 10;
if (argc > 1)
iprint = atoi(argv[1]);
if (argc > 2)
nrows = atoi(argv[2]);
if (argc > 3)
ncols = atoi(argv[3]);
printf("\nMatrice con nrows = %d ncols = %d\n", nrows, ncols);
size_t ncells = nrows * ncols;
size_t nBytes = ncells * sizeof(float);
// execution configuration
dim3 block(BDIMX, BDIMY);
dim3 grid((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y);
dim3 grid2((grid.x + 2 - 1) / 2, grid.y);
// allocate host memory
float *h_A = (float *) malloc(nBytes);
float *hostRef = (float *) malloc(nBytes);
float *gpuRef = (float *) malloc(nBytes);
// initialize host array
initialData(h_A, nrows * ncols);
// transpose at host side
transposeHost(hostRef, h_A, nrows, ncols);
// allocate device memory
float *d_A, *d_C;
CHECK(hipMalloc((float** )&d_A, nBytes));
CHECK(hipMalloc((float** )&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
// tranpose gmem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
double iStart = seconds();
hipLaunchKernelGGL(( copyGmem), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
double iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if (iprint)
printData(gpuRef, nrows * ncols);
float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("copyGmem elapsed %f sec\n <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x, block.y, ibnd);
// tranpose gmem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( naiveGmem), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if (iprint)
printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("naiveGmem elapsed %f sec\n <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmem), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if (iprint)
printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmem elapsed %f sec\n <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// free host and device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
void initialData(float *in, const int size) {
for (int i = 0; i < size; i++)
in[i] = i; // (float)(rand()/INT_MAX) * 10.0f;
return;
}
void printData(float *in, const int size) {
for (int i = 0; i < size; i++)
printf("%3.0f ", in[i]);
printf("\n");
return;
}
void transposeHost(float *out, float *in, const int nrows, const int ncols) {
for (int iy = 0; iy < nrows; ++iy)
for (int ix = 0; ix < ncols; ++ix)
out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)];
}
__global__ void copyGmem(float *out, float *in, const int nrows,
const int ncols) {
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
// NOTE this is a transpose, not a copy
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
__global__ void naiveGmem(float *out, float *in, const int nrows,
const int ncols) {
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
void checkResult(float *hostRef, float *gpuRef, int rows, int cols) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
int index = INDEX(i, j, cols);
if (abs(hostRef[index] - gpuRef[index]) > epsilon) {
match = 0;
printf("different on (%d, %d) (offset=%d) element in "
"transposed matrix: host %f gpu %f\n", i, j, index,
hostRef[index], gpuRef[index]);
break;
}
}
if (!match)
break;
}
if (!match)
printf("Arrays do not match.\n\n");
}
|
ffc618e53f5c50a8b99437926a4307adaa84e535.cu
|
#include "../common/common.h"
#include <stdio.h>
// Dimensione del blocco
#define BDIMX 16
#define BDIMY 16
// macro x conversione indici lineari
#define INDEX(rows, cols, stride) (rows * stride + cols)
// prototipi funzioni
void initialData(float*, const int);
void printData(float*, const int);
void checkResult(float*, float*, int, int);
void transposeHost(float*, float*, const int, const int);
__global__ void copyGmem(float*, float*, const int, const int);
__global__ void naiveGmem(float*, float*, const int, const int);
/*
* Kernel per il calcolo della matrice trasposta usando la shared memory
*/
__global__ void transposeSmem(float *out, float *in, int nrows, int ncols) {
// TODO
}
int main(int argc, char **argv) {
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool iprint = 0;
// set up array size
int nrows = 1 << 10;
int ncols = 1 << 10;
if (argc > 1)
iprint = atoi(argv[1]);
if (argc > 2)
nrows = atoi(argv[2]);
if (argc > 3)
ncols = atoi(argv[3]);
printf("\nMatrice con nrows = %d ncols = %d\n", nrows, ncols);
size_t ncells = nrows * ncols;
size_t nBytes = ncells * sizeof(float);
// execution configuration
dim3 block(BDIMX, BDIMY);
dim3 grid((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y);
dim3 grid2((grid.x + 2 - 1) / 2, grid.y);
// allocate host memory
float *h_A = (float *) malloc(nBytes);
float *hostRef = (float *) malloc(nBytes);
float *gpuRef = (float *) malloc(nBytes);
// initialize host array
initialData(h_A, nrows * ncols);
// transpose at host side
transposeHost(hostRef, h_A, nrows, ncols);
// allocate device memory
float *d_A, *d_C;
CHECK(cudaMalloc((float** )&d_A, nBytes));
CHECK(cudaMalloc((float** )&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// tranpose gmem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
double iStart = seconds();
copyGmem<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
double iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if (iprint)
printData(gpuRef, nrows * ncols);
float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("copyGmem elapsed %f sec\n <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x, block.y, ibnd);
// tranpose gmem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
naiveGmem<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if (iprint)
printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("naiveGmem elapsed %f sec\n <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmem<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if (iprint)
printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmem elapsed %f sec\n <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// free host and device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
void initialData(float *in, const int size) {
for (int i = 0; i < size; i++)
in[i] = i; // (float)(rand()/INT_MAX) * 10.0f;
return;
}
void printData(float *in, const int size) {
for (int i = 0; i < size; i++)
printf("%3.0f ", in[i]);
printf("\n");
return;
}
void transposeHost(float *out, float *in, const int nrows, const int ncols) {
for (int iy = 0; iy < nrows; ++iy)
for (int ix = 0; ix < ncols; ++ix)
out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)];
}
__global__ void copyGmem(float *out, float *in, const int nrows,
const int ncols) {
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
// NOTE this is a transpose, not a copy
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
__global__ void naiveGmem(float *out, float *in, const int nrows,
const int ncols) {
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
void checkResult(float *hostRef, float *gpuRef, int rows, int cols) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
int index = INDEX(i, j, cols);
if (abs(hostRef[index] - gpuRef[index]) > epsilon) {
match = 0;
printf("different on (%d, %d) (offset=%d) element in "
"transposed matrix: host %f gpu %f\n", i, j, index,
hostRef[index], gpuRef[index]);
break;
}
}
if (!match)
break;
}
if (!match)
printf("Arrays do not match.\n\n");
}
|
5ea4a43cc5013fa2d374011f97013747ee7cbd66.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p3.cu -o assignment5-p3
#include <cmath>
#include <iostream>
#include <sys/time.h>
#define SIZE 4096
#define THRESHOLD (0.000001)
//Tile Dimension should be less than or equal to 32 due to thread_per_block limitations
#define TileDimen (1<<5)
using std::cerr;
using std::cout;
using std::endl;
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__host__ void ATAonCPU(double** M, double** P) {
for (int k = 0; k < SIZE; k++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++)
P[i][j] += M[k][i] * M[k][j];
}
}
}
__host__ void check_result(double** Test, double** Ref) {
double maxdiff = 0, rel_diff = 0;
int numdiffs = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
rel_diff = (Test[i][j] - Ref[i][j]);
if (fabs(rel_diff) > THRESHOLD) {
numdiffs++;
if (rel_diff > maxdiff)
maxdiff = rel_diff;
}
}
}
if (numdiffs > 0)
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << " Max Diff = " << maxdiff
<< "\n";
else
cout << "No differences found between base and test versions\n";
}
__global__ void ATAkernel(double* A, double* B) {
// TODO: Fill in
int TileDim=0;
if(TileDimen>=SIZE){
TileDim = SIZE;
}
else{
TileDim = TileDimen;
}
int iv = blockDim.x*blockIdx.x + threadIdx.x;
int jv = blockDim.y*blockIdx.y + threadIdx.y;
__shared__ double tileA[TileDimen][TileDimen];
__shared__ double tileAT[TileDimen][TileDimen];
int numtiles = SIZE/TileDim;
double temp = 0;
for(int i=0;i<numtiles;i++){
int rowTileA = blockIdx.x;
int colTileA = i;
int rowTileAT = i;
int colTileAT = blockIdx.y;
int r = threadIdx.x, c = threadIdx.y;
tileA[r][c] = A[(rowTileA*TileDim + r)*SIZE + colTileA*TileDim + c];
tileAT[r][c] = A[(colTileAT*TileDim + c)*SIZE + rowTileAT*TileDim + r];
__syncthreads();
int x = threadIdx.x;
int y = threadIdx.y;
if(iv<=jv){
//unrolling below loop
int j=0;
for(;j+3<TileDim;j+=4){
temp+=tileA[x][j]*tileAT[j][y];
temp+= tileA[x][j+1]*tileAT[j+1][y];
temp+= tileA[x][j+2]*tileAT[j+2][y];
temp+= tileA[x][j+3]*tileAT[j+3][y];
}
for(;j<TileDimen;j++){
temp+=tileA[x][j]*tileAT[j][y];
}
}
__syncthreads();
}
if(iv<SIZE && jv<SIZE && iv<=jv){
B[iv*SIZE + jv] = temp;
B[jv*SIZE + iv] = temp;
}
}
__global__ void ATAkernel2(double* A, double* B) {
// TODO: Fill in
int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j = (blockDim.y*blockIdx.y + threadIdx.y);
if(i<SIZE && j<SIZE && i<=j){
for(int k=0;k<SIZE;k++){
B[i*SIZE + j] += A[k*SIZE + i]*A[k*SIZE + j];
}
B[j*SIZE + i] = B[i*SIZE + j];
}
}
int main() {
cout << "Matrix Size = " << SIZE << "\n";
double** h_in = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
h_in[i] = new double[SIZE];
}
double** h_cpu_out = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
h_cpu_out[i] = new double[SIZE];
}
double** h_dev_out = new double*[SIZE];
double** h_dev_out_2 = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
h_dev_out[i] = new double[SIZE];
h_dev_out_2[i] = new double[SIZE];
}
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_in[i][j] = i * j * 0.25;
h_cpu_out[i][j] = 0;
h_dev_out[i][j] = 0;
h_dev_out_2[i][j] = 0;
}
}
double clkbegin = rtclock();
ATAonCPU(h_in, h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "A^T.A on CPU: " << ((2.0 * SIZE * SIZE * SIZE) / cpu_time)
<< " GFLOPS; Time = " << cpu_time * 1000 << " msec" << endl;
hipError_t status;
hipEvent_t start, end;
double* d_in;
double* d_out;
float kernel_time;
// TODO: Fill in
int matsizes = SIZE*SIZE*sizeof(double);
status = hipMalloc(&d_in,matsizes);
status = hipMalloc(&d_out,matsizes);
if(status!=hipSuccess){
cout<<"Cuda Malloc Failed"<<endl;
}
double* tempin = new double[SIZE*SIZE];
for(int i=0;i<SIZE*SIZE;i++){
tempin[i] = h_in[(i/SIZE)][i%SIZE];
}
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
status = hipMemcpy(d_in, tempin, matsizes, hipMemcpyHostToDevice);
int TileDim = min(TileDimen,min((1<<5),SIZE));
int threadPB = TileDim;
int numPB = (int)ceil(((double)(SIZE))/((double)threadPB));
dim3 GridD(numPB,numPB,1);
dim3 BlockD(threadPB,threadPB,1);
hipLaunchKernelGGL(( ATAkernel), dim3(GridD),dim3(BlockD), 0, 0, d_in,d_out);
double* tempout = new double[SIZE*SIZE];
status = hipMemcpy(tempout, d_out, matsizes, hipMemcpyDeviceToHost);
hipEventRecord(end, 0);
hipEventSynchronize(end);
for(int i=0;i<SIZE*SIZE;i++){
h_dev_out[(i/SIZE)][i%SIZE] = tempout[i];
}
hipEventElapsedTime(&kernel_time, start, end);
hipEventDestroy(start);
hipEventDestroy(end);
cout << "A^T.A on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
check_result(h_cpu_out, h_dev_out);
hipError_t status2;
hipEvent_t start2, end2;
double* d_in_2;
double* d_out_2;
float kernel_time_2;
// TODO: Fill in
status2 = hipMalloc(&d_in_2,matsizes);
status2 = hipMalloc(&d_out_2,matsizes);
if(status2!=hipSuccess){
cout<<"Cuda Malloc Failed"<<endl;
}
double* tempin_2 = new double[SIZE*SIZE];
for(int i=0;i<SIZE*SIZE;i++){
tempin_2[i] = h_in[(i/SIZE)][i%SIZE];
}
hipEventCreate(&start2);
hipEventCreate(&end2);
hipEventRecord(start2, 0);
status2 = hipMemcpy(d_in_2, tempin_2, matsizes, hipMemcpyHostToDevice);
int threadPB2 = min(32,SIZE);
int numPB2 = (int)ceil(((double)(SIZE))/((double)threadPB2));
dim3 GridD2(numPB2,numPB2,1);
dim3 BlockD2(threadPB2,threadPB2,1);
hipLaunchKernelGGL(( ATAkernel2), dim3(GridD2),dim3(BlockD2), 0, 0, d_in_2,d_out_2);
double* tempout_2 = new double[SIZE*SIZE];
status2 = hipMemcpy(tempout_2, d_out_2, matsizes, hipMemcpyDeviceToHost);
hipEventRecord(end2, 0);
hipEventSynchronize(end2);
for(int i=0;i<SIZE*SIZE;i++){
h_dev_out_2[(i/SIZE)][i%SIZE] = tempout_2[i];
}
hipEventElapsedTime(&kernel_time_2, start2, end2);
hipEventDestroy(start2);
hipEventDestroy(end2);
cout << "A^T.A 2 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time_2 << " msec" << endl;
check_result(h_cpu_out, h_dev_out_2);
// Free device memory
hipFree(d_in);
hipFree(d_out);
hipFree(d_in_2);
hipFree(d_out_2);
free(h_in);
free(h_cpu_out);
free(h_dev_out);
free(h_dev_out_2);
return EXIT_SUCCESS;
}
|
5ea4a43cc5013fa2d374011f97013747ee7cbd66.cu
|
// Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p3.cu -o assignment5-p3
#include <cmath>
#include <iostream>
#include <sys/time.h>
#define SIZE 4096
#define THRESHOLD (0.000001)
//Tile Dimension should be less than or equal to 32 due to thread_per_block limitations
#define TileDimen (1<<5)
using std::cerr;
using std::cout;
using std::endl;
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__host__ void ATAonCPU(double** M, double** P) {
for (int k = 0; k < SIZE; k++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++)
P[i][j] += M[k][i] * M[k][j];
}
}
}
__host__ void check_result(double** Test, double** Ref) {
double maxdiff = 0, rel_diff = 0;
int numdiffs = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
rel_diff = (Test[i][j] - Ref[i][j]);
if (fabs(rel_diff) > THRESHOLD) {
numdiffs++;
if (rel_diff > maxdiff)
maxdiff = rel_diff;
}
}
}
if (numdiffs > 0)
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << " Max Diff = " << maxdiff
<< "\n";
else
cout << "No differences found between base and test versions\n";
}
__global__ void ATAkernel(double* A, double* B) {
// TODO: Fill in
int TileDim=0;
if(TileDimen>=SIZE){
TileDim = SIZE;
}
else{
TileDim = TileDimen;
}
int iv = blockDim.x*blockIdx.x + threadIdx.x;
int jv = blockDim.y*blockIdx.y + threadIdx.y;
__shared__ double tileA[TileDimen][TileDimen];
__shared__ double tileAT[TileDimen][TileDimen];
int numtiles = SIZE/TileDim;
double temp = 0;
for(int i=0;i<numtiles;i++){
int rowTileA = blockIdx.x;
int colTileA = i;
int rowTileAT = i;
int colTileAT = blockIdx.y;
int r = threadIdx.x, c = threadIdx.y;
tileA[r][c] = A[(rowTileA*TileDim + r)*SIZE + colTileA*TileDim + c];
tileAT[r][c] = A[(colTileAT*TileDim + c)*SIZE + rowTileAT*TileDim + r];
__syncthreads();
int x = threadIdx.x;
int y = threadIdx.y;
if(iv<=jv){
//unrolling below loop
int j=0;
for(;j+3<TileDim;j+=4){
temp+=tileA[x][j]*tileAT[j][y];
temp+= tileA[x][j+1]*tileAT[j+1][y];
temp+= tileA[x][j+2]*tileAT[j+2][y];
temp+= tileA[x][j+3]*tileAT[j+3][y];
}
for(;j<TileDimen;j++){
temp+=tileA[x][j]*tileAT[j][y];
}
}
__syncthreads();
}
if(iv<SIZE && jv<SIZE && iv<=jv){
B[iv*SIZE + jv] = temp;
B[jv*SIZE + iv] = temp;
}
}
__global__ void ATAkernel2(double* A, double* B) {
// TODO: Fill in
int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j = (blockDim.y*blockIdx.y + threadIdx.y);
if(i<SIZE && j<SIZE && i<=j){
for(int k=0;k<SIZE;k++){
B[i*SIZE + j] += A[k*SIZE + i]*A[k*SIZE + j];
}
B[j*SIZE + i] = B[i*SIZE + j];
}
}
int main() {
cout << "Matrix Size = " << SIZE << "\n";
double** h_in = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
h_in[i] = new double[SIZE];
}
double** h_cpu_out = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
h_cpu_out[i] = new double[SIZE];
}
double** h_dev_out = new double*[SIZE];
double** h_dev_out_2 = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
h_dev_out[i] = new double[SIZE];
h_dev_out_2[i] = new double[SIZE];
}
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_in[i][j] = i * j * 0.25;
h_cpu_out[i][j] = 0;
h_dev_out[i][j] = 0;
h_dev_out_2[i][j] = 0;
}
}
double clkbegin = rtclock();
ATAonCPU(h_in, h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "A^T.A on CPU: " << ((2.0 * SIZE * SIZE * SIZE) / cpu_time)
<< " GFLOPS; Time = " << cpu_time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
double* d_in;
double* d_out;
float kernel_time;
// TODO: Fill in
int matsizes = SIZE*SIZE*sizeof(double);
status = cudaMalloc(&d_in,matsizes);
status = cudaMalloc(&d_out,matsizes);
if(status!=cudaSuccess){
cout<<"Cuda Malloc Failed"<<endl;
}
double* tempin = new double[SIZE*SIZE];
for(int i=0;i<SIZE*SIZE;i++){
tempin[i] = h_in[(i/SIZE)][i%SIZE];
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, tempin, matsizes, cudaMemcpyHostToDevice);
int TileDim = min(TileDimen,min((1<<5),SIZE));
int threadPB = TileDim;
int numPB = (int)ceil(((double)(SIZE))/((double)threadPB));
dim3 GridD(numPB,numPB,1);
dim3 BlockD(threadPB,threadPB,1);
ATAkernel<<<GridD,BlockD>>>(d_in,d_out);
double* tempout = new double[SIZE*SIZE];
status = cudaMemcpy(tempout, d_out, matsizes, cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
for(int i=0;i<SIZE*SIZE;i++){
h_dev_out[(i/SIZE)][i%SIZE] = tempout[i];
}
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
cout << "A^T.A on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
check_result(h_cpu_out, h_dev_out);
cudaError_t status2;
cudaEvent_t start2, end2;
double* d_in_2;
double* d_out_2;
float kernel_time_2;
// TODO: Fill in
status2 = cudaMalloc(&d_in_2,matsizes);
status2 = cudaMalloc(&d_out_2,matsizes);
if(status2!=cudaSuccess){
cout<<"Cuda Malloc Failed"<<endl;
}
double* tempin_2 = new double[SIZE*SIZE];
for(int i=0;i<SIZE*SIZE;i++){
tempin_2[i] = h_in[(i/SIZE)][i%SIZE];
}
cudaEventCreate(&start2);
cudaEventCreate(&end2);
cudaEventRecord(start2, 0);
status2 = cudaMemcpy(d_in_2, tempin_2, matsizes, cudaMemcpyHostToDevice);
int threadPB2 = min(32,SIZE);
int numPB2 = (int)ceil(((double)(SIZE))/((double)threadPB2));
dim3 GridD2(numPB2,numPB2,1);
dim3 BlockD2(threadPB2,threadPB2,1);
ATAkernel2<<<GridD2,BlockD2>>>(d_in_2,d_out_2);
double* tempout_2 = new double[SIZE*SIZE];
status2 = cudaMemcpy(tempout_2, d_out_2, matsizes, cudaMemcpyDeviceToHost);
cudaEventRecord(end2, 0);
cudaEventSynchronize(end2);
for(int i=0;i<SIZE*SIZE;i++){
h_dev_out_2[(i/SIZE)][i%SIZE] = tempout_2[i];
}
cudaEventElapsedTime(&kernel_time_2, start2, end2);
cudaEventDestroy(start2);
cudaEventDestroy(end2);
cout << "A^T.A 2 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time_2 << " msec" << endl;
check_result(h_cpu_out, h_dev_out_2);
// Free device memory
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_in_2);
cudaFree(d_out_2);
free(h_in);
free(h_cpu_out);
free(h_dev_out);
free(h_dev_out_2);
return EXIT_SUCCESS;
}
|
0900989a1f22bc23f9c9ae74c8bdc167f9ef3fc7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void maxi(int *d_a,int n){
int strid=(n/2);
int idx=threadIdx.x;
while(strid>0){
if(idx<strid)
if(d_a[idx]<d_a[idx+strid])
d_a[idx]=d_a[idx+strid];
strid=(strid/2);
}
}
int main(){
int n=512;
int *a;
a=(int*)malloc(n*sizeof(int));
for(int i=0; i<n; i++)
a[i]=i*i*i;
int *d_a;
hipMalloc((void**)&d_a,n*sizeof(int));
hipMemcpy(d_a,a,n*sizeof(int),hipMemcpyHostToDevice);
dim3 blockD(n,1,1);
dim3 gridD(1,1,1);
hipLaunchKernelGGL((
maxi), dim3(gridD),dim3(blockD), 0, 0, d_a,n);
int *b;
b=(int*)malloc(n*sizeof(int));
hipMemcpy(b,d_a,n*sizeof(int),hipMemcpyDeviceToHost);
printf("%d\n",b[0]);
}
|
0900989a1f22bc23f9c9ae74c8bdc167f9ef3fc7.cu
|
#include<stdio.h>
__global__ void maxi(int *d_a,int n){
int strid=(n/2);
int idx=threadIdx.x;
while(strid>0){
if(idx<strid)
if(d_a[idx]<d_a[idx+strid])
d_a[idx]=d_a[idx+strid];
strid=(strid/2);
}
}
int main(){
int n=512;
int *a;
a=(int*)malloc(n*sizeof(int));
for(int i=0; i<n; i++)
a[i]=i*i*i;
int *d_a;
cudaMalloc((void**)&d_a,n*sizeof(int));
cudaMemcpy(d_a,a,n*sizeof(int),cudaMemcpyHostToDevice);
dim3 blockD(n,1,1);
dim3 gridD(1,1,1);
maxi<<<gridD,blockD>>>(d_a,n);
int *b;
b=(int*)malloc(n*sizeof(int));
cudaMemcpy(b,d_a,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("%d\n",b[0]);
}
|
8adf03cd1bab9c9df1a45114753aa638b5358a2a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#include <hip/hip_runtime.h>
#include <chrono> // for high_resolution_clock
#define BLOCK_SIZE 32
#define RADIUS 4
__global__ void denoising(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int rows, int cols, int kernelSize, int percent)
{
__shared__ uchar3 temp[BLOCK_SIZE + 2 * RADIUS][BLOCK_SIZE + 2 * RADIUS];
// local indices
int lindex_X = threadIdx.x + RADIUS;
int lindex_Y = threadIdx.y + RADIUS;
int dst_x = blockDim.x * blockIdx.x + lindex_X;
int dst_y = blockDim.y * blockIdx.y + lindex_Y;
if (dst_x < cols && dst_y < rows)
{
// Read input elements into shared memory
temp[lindex_Y][lindex_X] = src(dst_y, dst_x);
if (threadIdx.x < RADIUS) {
temp[lindex_Y][lindex_X - RADIUS] = src(dst_y, dst_x - RADIUS);
if (dst_x + BLOCK_SIZE < cols)
temp[lindex_Y][lindex_X + BLOCK_SIZE] = src(dst_y, dst_x + BLOCK_SIZE);
}
if (threadIdx.y < RADIUS) {
temp[lindex_Y - RADIUS][lindex_X] = src(dst_y - RADIUS, dst_x);
if (dst_y + BLOCK_SIZE < rows)
temp[lindex_Y + BLOCK_SIZE][lindex_X] = src(dst_y + BLOCK_SIZE, dst_x);
}
if (threadIdx.y < RADIUS && threadIdx.x < RADIUS) {
temp[lindex_Y - RADIUS][lindex_X - RADIUS] = src(dst_y - RADIUS, dst_x - RADIUS);
if (dst_y + BLOCK_SIZE < rows && dst_x + BLOCK_SIZE < cols)
temp[lindex_Y + BLOCK_SIZE][lindex_X + BLOCK_SIZE] = src(dst_y + BLOCK_SIZE, dst_x + BLOCK_SIZE);
if (dst_x + BLOCK_SIZE < cols)
temp[lindex_Y - RADIUS][lindex_X + BLOCK_SIZE] = src(dst_y - RADIUS, dst_x + BLOCK_SIZE);
if (dst_y + BLOCK_SIZE < rows)
temp[lindex_Y + BLOCK_SIZE][lindex_X - RADIUS] = src(dst_y + BLOCK_SIZE, dst_x - RADIUS);
}
}
// Synchronize (ensure all the data is available)
__syncthreads();
const int k = (kernelSize - 1.0) / 2.0;
if (dst_x < cols - k && dst_y < rows - k &&
dst_x > k && dst_y > k)
{
float rNeighbors[144];
float gNeighbors[144];
float bNeighbors[144];
float rValue = 0.0;
float gValue = 0.0;
float bValue = 0.0;
int counter = 0;
for (int i = -RADIUS; i <= RADIUS; i++)
{
for (int j = -RADIUS; j <= RADIUS; j++)
{
uchar3 cur = temp[lindex_Y + i][lindex_X + j];
rNeighbors[counter] = (float)cur.z;
gNeighbors[counter] = (float)cur.y;
bNeighbors[counter] = (float)cur.x;
counter++;
}
}
int key, j;
for (int i = 1; i < k * k; i++)
{
key = rNeighbors[i];
j = i - 1;
while (j >= 0 && rNeighbors[j] > key)
{
rNeighbors[j + 1] = rNeighbors[j];
j = j - 1;
}
rNeighbors[j + 1] = key;
}
for (int i = 1; i < k * k; i++)
{
key = gNeighbors[i];
j = i - 1;
while (j >= 0 && gNeighbors[j] > key)
{
gNeighbors[j + 1] = gNeighbors[j];
j = j - 1;
}
gNeighbors[j + 1] = key;
}
for (int i = 1; i < k * k; i++)
{
key = bNeighbors[i];
j = i - 1;
while (j >= 0 && bNeighbors[j] > key)
{
bNeighbors[j + 1] = bNeighbors[j];
j = j - 1;
}
bNeighbors[j + 1] = key;
}
int medianIndx = 0;
if (kernelSize % 2 == 0) {
medianIndx = kernelSize * kernelSize / 2;
}
else {
medianIndx = (kernelSize * kernelSize - 1) / 2;
}
int numEl = (kernelSize * kernelSize * int(percent) / 100) / 2;
for (int w = (medianIndx - numEl); w <= (medianIndx + numEl); w++) {
rValue += rNeighbors[w];
gValue += gNeighbors[w];
bValue += bNeighbors[w];
}
if (numEl >= 1) {
rValue /= float(2 * numEl + 1);
gValue /= float(2 * numEl + 1);
bValue /= float(2 * numEl + 1);
}
dst(dst_y, dst_x).z = (unsigned char)(rValue);
dst(dst_y, dst_x).y = (unsigned char)(gValue);
dst(dst_y, dst_x).x = (unsigned char)(bValue);
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void denoisingCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, int kernelSize, int percent)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
denoising << <grid, block >> > (src, dst, dst.rows, dst.cols, kernelSize, percent);
}
|
8adf03cd1bab9c9df1a45114753aa638b5358a2a.cu
|
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#include <cuda_runtime.h>
#include <chrono> // for high_resolution_clock
#define BLOCK_SIZE 32
#define RADIUS 4
__global__ void denoising(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int rows, int cols, int kernelSize, int percent)
{
__shared__ uchar3 temp[BLOCK_SIZE + 2 * RADIUS][BLOCK_SIZE + 2 * RADIUS];
// local indices
int lindex_X = threadIdx.x + RADIUS;
int lindex_Y = threadIdx.y + RADIUS;
int dst_x = blockDim.x * blockIdx.x + lindex_X;
int dst_y = blockDim.y * blockIdx.y + lindex_Y;
if (dst_x < cols && dst_y < rows)
{
// Read input elements into shared memory
temp[lindex_Y][lindex_X] = src(dst_y, dst_x);
if (threadIdx.x < RADIUS) {
temp[lindex_Y][lindex_X - RADIUS] = src(dst_y, dst_x - RADIUS);
if (dst_x + BLOCK_SIZE < cols)
temp[lindex_Y][lindex_X + BLOCK_SIZE] = src(dst_y, dst_x + BLOCK_SIZE);
}
if (threadIdx.y < RADIUS) {
temp[lindex_Y - RADIUS][lindex_X] = src(dst_y - RADIUS, dst_x);
if (dst_y + BLOCK_SIZE < rows)
temp[lindex_Y + BLOCK_SIZE][lindex_X] = src(dst_y + BLOCK_SIZE, dst_x);
}
if (threadIdx.y < RADIUS && threadIdx.x < RADIUS) {
temp[lindex_Y - RADIUS][lindex_X - RADIUS] = src(dst_y - RADIUS, dst_x - RADIUS);
if (dst_y + BLOCK_SIZE < rows && dst_x + BLOCK_SIZE < cols)
temp[lindex_Y + BLOCK_SIZE][lindex_X + BLOCK_SIZE] = src(dst_y + BLOCK_SIZE, dst_x + BLOCK_SIZE);
if (dst_x + BLOCK_SIZE < cols)
temp[lindex_Y - RADIUS][lindex_X + BLOCK_SIZE] = src(dst_y - RADIUS, dst_x + BLOCK_SIZE);
if (dst_y + BLOCK_SIZE < rows)
temp[lindex_Y + BLOCK_SIZE][lindex_X - RADIUS] = src(dst_y + BLOCK_SIZE, dst_x - RADIUS);
}
}
// Synchronize (ensure all the data is available)
__syncthreads();
const int k = (kernelSize - 1.0) / 2.0;
if (dst_x < cols - k && dst_y < rows - k &&
dst_x > k && dst_y > k)
{
float rNeighbors[144];
float gNeighbors[144];
float bNeighbors[144];
float rValue = 0.0;
float gValue = 0.0;
float bValue = 0.0;
int counter = 0;
for (int i = -RADIUS; i <= RADIUS; i++)
{
for (int j = -RADIUS; j <= RADIUS; j++)
{
uchar3 cur = temp[lindex_Y + i][lindex_X + j];
rNeighbors[counter] = (float)cur.z;
gNeighbors[counter] = (float)cur.y;
bNeighbors[counter] = (float)cur.x;
counter++;
}
}
int key, j;
for (int i = 1; i < k * k; i++)
{
key = rNeighbors[i];
j = i - 1;
while (j >= 0 && rNeighbors[j] > key)
{
rNeighbors[j + 1] = rNeighbors[j];
j = j - 1;
}
rNeighbors[j + 1] = key;
}
for (int i = 1; i < k * k; i++)
{
key = gNeighbors[i];
j = i - 1;
while (j >= 0 && gNeighbors[j] > key)
{
gNeighbors[j + 1] = gNeighbors[j];
j = j - 1;
}
gNeighbors[j + 1] = key;
}
for (int i = 1; i < k * k; i++)
{
key = bNeighbors[i];
j = i - 1;
while (j >= 0 && bNeighbors[j] > key)
{
bNeighbors[j + 1] = bNeighbors[j];
j = j - 1;
}
bNeighbors[j + 1] = key;
}
int medianIndx = 0;
if (kernelSize % 2 == 0) {
medianIndx = kernelSize * kernelSize / 2;
}
else {
medianIndx = (kernelSize * kernelSize - 1) / 2;
}
int numEl = (kernelSize * kernelSize * int(percent) / 100) / 2;
for (int w = (medianIndx - numEl); w <= (medianIndx + numEl); w++) {
rValue += rNeighbors[w];
gValue += gNeighbors[w];
bValue += bNeighbors[w];
}
if (numEl >= 1) {
rValue /= float(2 * numEl + 1);
gValue /= float(2 * numEl + 1);
bValue /= float(2 * numEl + 1);
}
dst(dst_y, dst_x).z = (unsigned char)(rValue);
dst(dst_y, dst_x).y = (unsigned char)(gValue);
dst(dst_y, dst_x).x = (unsigned char)(bValue);
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void denoisingCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, int kernelSize, int percent)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
denoising << <grid, block >> > (src, dst, dst.rows, dst.cols, kernelSize, percent);
}
|
1421a8aa0feba1ac981359a45ce17ee928837bb6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2020 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr) :
d_data{data}, d_row_ptr{row_ptr} {}
};
struct SparsePageLoader {
bool use_shared;
common::Span<const bst_row_t> d_row_ptr;
common::Span<const Entry> d_data;
bst_feature_t num_features;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(data.d_row_ptr),
d_data(data.d_data),
num_features(num_features),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) const {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start)
: matrix{m} {}
__device__ __forceinline__ float GetFvalue(int ridx, int fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
struct CuPyAdapterLoader {
data::CupyAdapterBatch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
DEV_INLINE CuPyAdapterLoader(data::CupyAdapterBatch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) :
batch{batch},
columns{num_features},
use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value;
}
}
}
__syncthreads();
}
DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetElement(ridx * columns + fidx).value;
}
};
struct CuDFAdapterLoader {
data::CudfAdapterBatch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
DEV_INLINE CuDFAdapterLoader(data::CudfAdapterBatch const batch, bool use_shared,
bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: batch{batch}, columns{num_features}, use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
for (size_t i = 0; i < columns; ++i) {
smem[threadIdx.x * columns + i] = batch.GetValue(global_idx, i);
}
}
}
__syncthreads();
}
DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetValue(ridx, fidx);
}
};
template <typename Loader>
__device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree,
Loader* loader) {
RegTree::Node n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.SplitIndex());
// Missing value
if (isnan(fvalue)) {
n = tree[n.DefaultChild()];
} else {
if (fvalue < n.SplitCond()) {
n = tree[n.LeftChild()];
} else {
n = tree[n.RightChild()];
}
}
}
return n.LeafValue();
}
template <typename Loader, typename Data>
__global__ void PredictKernel(Data data,
common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
float leaf = GetLeafWeight(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
dh::device_vector<RegTree::Node> nodes;
dh::device_vector<size_t> tree_segments;
dh::device_vector<int> tree_group;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void CopyModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<RegTree::Node>& h_nodes,
size_t tree_begin, size_t tree_end) {
nodes.resize(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(nodes.data().get(), h_nodes.data(),
sizeof(RegTree::Node) * h_nodes.size(),
hipMemcpyHostToDevice));
tree_segments.resize(h_tree_segments.size());
dh::safe_cuda(hipMemcpyAsync(tree_segments.data().get(), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
hipMemcpyHostToDevice));
tree_group.resize(model.tree_info.size());
dh::safe_cuda(hipMemcpyAsync(tree_group.data().get(), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
hipMemcpyHostToDevice));
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param_->num_output_group;
}
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments{};
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<RegTree::Node> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
CopyModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
}
};
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch, size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset) {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto shared_memory_bytes =
static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
size_t entry_start = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan()};
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<SparsePageLoader, SparsePageView>,
data,
dh::ToSpan(model_.nodes), predictions->DeviceSpan().subspan(batch_offset),
dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group),
model_.tree_beg_, model_.tree_end_, num_features, num_rows,
entry_start, use_shared, model_.num_group);
}
void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>,
batch,
dh::ToSpan(model_.nodes), out_preds->DeviceSpan().subspan(batch_offset),
dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group),
model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows,
entry_start, use_shared, model_.num_group);
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
if (tree_end - tree_begin == 0) {
return;
}
model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
out_preds->SetDevice(generic_param_->gpu_id);
if (dmat->PageExists<EllpackPage>()) {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
} else {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, model.learner_model_param_->num_feature,
out_preds, batch_offset);
batch_offset += batch.Size() * model.learner_model_param_->num_output_group;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0) {
dh::safe_cuda(hipSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
// This function is duplicated with CPU predictor PredictBatch, see comments in there.
// FIXME(trivialfis): Remove the duplication.
std::lock_guard<std::mutex> const guard(lock_);
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
ConfigureDevice(device);
CHECK_EQ(tree_begin, 0);
auto* out_preds = &predts->predictions;
CHECK_GE(predts->version, tree_begin);
if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) {
CHECK_EQ(predts->version, 0);
}
if (predts->version == 0) {
this->InitOutPredictions(dmat->Info(), out_preds, model);
}
uint32_t const output_groups = model.learner_model_param_->num_output_group;
CHECK_NE(output_groups, 0);
uint32_t real_ntree_limit = ntree_limit * output_groups;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups;
if (predts->version > end_version) {
CHECK_NE(ntree_limit, 0);
this->InitOutPredictions(dmat->Info(), out_preds, model);
predts->version = 0;
}
uint32_t const beg_version = predts->version;
CHECK_LE(beg_version, end_version);
if (beg_version < end_version) {
this->DevicePredictInternal(dmat, out_preds, model,
beg_version * output_groups,
end_version * output_groups);
}
uint32_t delta = end_version - beg_version;
CHECK_LE(delta, model.trees.size());
predts->Update(delta);
CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ ||
out_preds->Size() == dmat->Info().num_row_);
}
template <typename Adapter, typename Loader, typename Batch>
void DispatchedInplacePredict(dmlc::any const &x,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id);
uint32_t const output_groups = model.learner_model_param_->num_output_group;
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id);
auto m = dmlc::get<Adapter>(x);
CHECK_EQ(m.NumColumns(), model.learner_model_param_->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(this->generic_param_->gpu_id, m.DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m.DeviceIdx();
MetaInfo info;
info.num_col_ = m.NumColumns();
info.num_row_ = m.NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS));
auto shared_memory_bytes =
static_cast<size_t>(sizeof(float) * m.NumColumns() * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
use_shared = false;
}
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, Batch>,
m.Value(),
dh::ToSpan(d_model.nodes), out_preds->predictions.DeviceSpan(),
dh::ToSpan(d_model.tree_segments), dh::ToSpan(d_model.tree_group),
tree_begin, tree_end, m.NumColumns(), info.num_row_,
entry_start, use_shared, output_groups);
}
void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model,
float missing, PredictionCacheEntry *out_preds,
uint32_t tree_begin, unsigned tree_end) const override {
auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id);
if (x.type() == typeid(data::CupyAdapter)) {
this->DispatchedInplacePredict<data::CupyAdapter, CuPyAdapterLoader, data::CupyAdapterBatch>(
x, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(data::CudfAdapter)) {
this->DispatchedInplacePredict<data::CudfAdapter, CuDFAdapterLoader, data::CudfAdapterBatch>(
x, model, missing, out_preds, tree_begin, tree_end);
} else {
LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor.";
}
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.learner_model_param_->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param_->base_score);
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
std::vector<bst_float>* tree_weights,
bool approximate, int condition,
unsigned condition_feature) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
std::vector<bst_float>* tree_weights,
bool approximate) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
void ConfigureDevice(int device) {
if (device >= 0) {
max_shared_memory_bytes_ = dh::MaxSharedMemory(device);
}
}
std::mutex lock_;
DeviceModel model_;
size_t max_shared_memory_bytes_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
1421a8aa0feba1ac981359a45ce17ee928837bb6.cu
|
/*!
* Copyright 2017-2020 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr) :
d_data{data}, d_row_ptr{row_ptr} {}
};
struct SparsePageLoader {
bool use_shared;
common::Span<const bst_row_t> d_row_ptr;
common::Span<const Entry> d_data;
bst_feature_t num_features;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(data.d_row_ptr),
d_data(data.d_data),
num_features(num_features),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) const {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start)
: matrix{m} {}
__device__ __forceinline__ float GetFvalue(int ridx, int fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
struct CuPyAdapterLoader {
data::CupyAdapterBatch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
DEV_INLINE CuPyAdapterLoader(data::CupyAdapterBatch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) :
batch{batch},
columns{num_features},
use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value;
}
}
}
__syncthreads();
}
DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetElement(ridx * columns + fidx).value;
}
};
struct CuDFAdapterLoader {
data::CudfAdapterBatch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
DEV_INLINE CuDFAdapterLoader(data::CudfAdapterBatch const batch, bool use_shared,
bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start)
: batch{batch}, columns{num_features}, use_shared{use_shared} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
for (size_t i = 0; i < columns; ++i) {
smem[threadIdx.x * columns + i] = batch.GetValue(global_idx, i);
}
}
}
__syncthreads();
}
DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
return batch.GetValue(ridx, fidx);
}
};
template <typename Loader>
__device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree,
Loader* loader) {
RegTree::Node n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.SplitIndex());
// Missing value
if (isnan(fvalue)) {
n = tree[n.DefaultChild()];
} else {
if (fvalue < n.SplitCond()) {
n = tree[n.LeftChild()];
} else {
n = tree[n.RightChild()];
}
}
}
return n.LeafValue();
}
template <typename Loader, typename Data>
__global__ void PredictKernel(Data data,
common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
float leaf = GetLeafWeight(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const RegTree::Node* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
dh::device_vector<RegTree::Node> nodes;
dh::device_vector<size_t> tree_segments;
dh::device_vector<int> tree_group;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void CopyModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<RegTree::Node>& h_nodes,
size_t tree_begin, size_t tree_end) {
nodes.resize(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(nodes.data().get(), h_nodes.data(),
sizeof(RegTree::Node) * h_nodes.size(),
cudaMemcpyHostToDevice));
tree_segments.resize(h_tree_segments.size());
dh::safe_cuda(cudaMemcpyAsync(tree_segments.data().get(), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
cudaMemcpyHostToDevice));
tree_group.resize(model.tree_info.size());
dh::safe_cuda(cudaMemcpyAsync(tree_group.data().get(), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
cudaMemcpyHostToDevice));
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param_->num_output_group;
}
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments{};
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<RegTree::Node> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
CopyModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
}
};
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch, size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset) {
batch.offset.SetDevice(generic_param_->gpu_id);
batch.data.SetDevice(generic_param_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto shared_memory_bytes =
static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
size_t entry_start = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan()};
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<SparsePageLoader, SparsePageView>,
data,
dh::ToSpan(model_.nodes), predictions->DeviceSpan().subspan(batch_offset),
dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group),
model_.tree_beg_, model_.tree_end_, num_features, num_rows,
entry_start, use_shared, model_.num_group);
}
void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>,
batch,
dh::ToSpan(model_.nodes), out_preds->DeviceSpan().subspan(batch_offset),
dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group),
model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows,
entry_start, use_shared, model_.num_group);
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
if (tree_end - tree_begin == 0) {
return;
}
model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id);
out_preds->SetDevice(generic_param_->gpu_id);
if (dmat->PageExists<EllpackPage>()) {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>()) {
this->PredictInternal(
page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
} else {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, model.learner_model_param_->num_feature,
out_preds, batch_offset);
batch_offset += batch.Size() * model.learner_model_param_->num_output_group;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (generic_param_->gpu_id >= 0) {
dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
// This function is duplicated with CPU predictor PredictBatch, see comments in there.
// FIXME(trivialfis): Remove the duplication.
std::lock_guard<std::mutex> const guard(lock_);
int device = generic_param_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
ConfigureDevice(device);
CHECK_EQ(tree_begin, 0);
auto* out_preds = &predts->predictions;
CHECK_GE(predts->version, tree_begin);
if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) {
CHECK_EQ(predts->version, 0);
}
if (predts->version == 0) {
this->InitOutPredictions(dmat->Info(), out_preds, model);
}
uint32_t const output_groups = model.learner_model_param_->num_output_group;
CHECK_NE(output_groups, 0);
uint32_t real_ntree_limit = ntree_limit * output_groups;
if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) {
real_ntree_limit = static_cast<uint32_t>(model.trees.size());
}
uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups;
if (predts->version > end_version) {
CHECK_NE(ntree_limit, 0);
this->InitOutPredictions(dmat->Info(), out_preds, model);
predts->version = 0;
}
uint32_t const beg_version = predts->version;
CHECK_LE(beg_version, end_version);
if (beg_version < end_version) {
this->DevicePredictInternal(dmat, out_preds, model,
beg_version * output_groups,
end_version * output_groups);
}
uint32_t delta = end_version - beg_version;
CHECK_LE(delta, model.trees.size());
predts->Update(delta);
CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ ||
out_preds->Size() == dmat->Info().num_row_);
}
template <typename Adapter, typename Loader, typename Batch>
void DispatchedInplacePredict(dmlc::any const &x,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id);
uint32_t const output_groups = model.learner_model_param_->num_output_group;
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id);
auto m = dmlc::get<Adapter>(x);
CHECK_EQ(m.NumColumns(), model.learner_model_param_->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(this->generic_param_->gpu_id, m.DeviceIdx())
<< "XGBoost is running on device: " << this->generic_param_->gpu_id << ", "
<< "but data is on: " << m.DeviceIdx();
MetaInfo info;
info.num_col_ = m.NumColumns();
info.num_row_ = m.NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS));
auto shared_memory_bytes =
static_cast<size_t>(sizeof(float) * m.NumColumns() * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
use_shared = false;
}
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, Batch>,
m.Value(),
dh::ToSpan(d_model.nodes), out_preds->predictions.DeviceSpan(),
dh::ToSpan(d_model.tree_segments), dh::ToSpan(d_model.tree_group),
tree_begin, tree_end, m.NumColumns(), info.num_row_,
entry_start, use_shared, output_groups);
}
void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model,
float missing, PredictionCacheEntry *out_preds,
uint32_t tree_begin, unsigned tree_end) const override {
auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id);
if (x.type() == typeid(data::CupyAdapter)) {
this->DispatchedInplacePredict<data::CupyAdapter, CuPyAdapterLoader, data::CupyAdapterBatch>(
x, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(data::CudfAdapter)) {
this->DispatchedInplacePredict<data::CudfAdapter, CuDFAdapterLoader, data::CudfAdapterBatch>(
x, model, missing, out_preds, tree_begin, tree_end);
} else {
LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor.";
}
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.learner_model_param_->num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->SetDevice(generic_param_->gpu_id);
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.learner_model_param_->base_score);
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
std::vector<bst_float>* tree_weights,
bool approximate, int condition,
unsigned condition_feature) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
std::vector<bst_float>* tree_weights,
bool approximate) override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
void ConfigureDevice(int device) {
if (device >= 0) {
max_shared_memory_bytes_ = dh::MaxSharedMemory(device);
}
}
std::mutex lock_;
DeviceModel model_;
size_t max_shared_memory_bytes_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
62d1756d6e6f91b934a6846933a9b958592e3bc9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Match8small(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
#define NRX 2
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R/NRX)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R/NRX)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
|
62d1756d6e6f91b934a6846933a9b958592e3bc9.cu
|
#include "includes.h"
__global__ void Match8small(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
#define NRX 2
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R/NRX)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R/NRX)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
|
4ce6798c1c91ff03154b8bc2ccde07f7c0d76194.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <hip/hip_runtime.h>
int div1(int M, int N) {
return ((M - 1) / N + 1);
}
#define KESTREL_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
#define MAX_THREAD_IN_BLOCK (512)
#define KESTREL_KERNEL_CFG(total) \
((total + MAX_THREAD_IN_BLOCK - 1) / MAX_THREAD_IN_BLOCK), MAX_THREAD_IN_BLOCK
texture<unsigned char, 1, hipReadModeElementType> texture1_;
texture<unsigned char, 2, hipReadModeElementType> texture2_;
void __global__ gray_kernel1(unsigned char* data, float* output, int w, int h , int stride) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < w * h) {
int _w = index % w;
int _h = index / w;
unsigned char* pixel = (unsigned char*)(data + (stride * _h + _w * 3));
output[index] = pixel[0] * 0.114 + pixel[1] * 0.587 + pixel[2] * 0.299;
}
}
__global__ void gray_kernel2(uint8_t *data, float *outdata, int32_t w, int32_t h, int32_t stride)
{
KESTREL_KERNEL_LOOP(index, w * h)
{
int _h = index / w;
int _w = index % w;
const uint8_t *IMAGE = (uint8_t *)(data + stride * _h + 3 * sizeof(uint8_t) * _w);
outdata[index] = IMAGE[0] * 0.114 + IMAGE[1] * 0.587 + IMAGE[2] * 0.299;
}
}
__global__ void gray_kernel3(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < w && y < h) {
unsigned char* pixel = data + (y * stride + x * 3);
output[y * w + x] = pixel[0] * 0.114 + pixel[1] * 0.587 + pixel[2] * 0.299;
}
}
__global__ void gray_kernel4(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < w && y < h) {
//unsigned char* pixel = tex1Dfetch(texture1_, stride * y + x * 3);
output[y * w + x] = tex1Dfetch(texture1_, stride * y + x * 3) * 0.114 + tex1Dfetch(texture1_, stride * y + x * 3 + 1) * 0.587 + tex1Dfetch(texture1_, stride * y + x * 3 + 2) * 0.299;
//output[y*w + x] = tex2D(texture1_, x, y) * 0.114 + tex2D(texture1_, x+1, y) * 0.587 + tex2D(texture1_, x+2, y) * 0.299;
}
}
__global__ void gray_kernel5(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = x * 3;
if (x < w && y < h) {
output[y * w + x] = tex2D(texture2_, index, y) * 0.114 + tex2D(texture2_, index + 1, y) * 0.587 + tex2D(texture2_, index + 2, y) * 0.299;
}
}
__global__ void gray_kernel6(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float scalar[] = {0.114f, 0.587f, 0.299f};
int index = x * 3;
if (x < w && y < h) {
//output[y * w + x] = tex2D(texture2_, index, y) * 0.114 + tex2D(texture2_, index + 1, y) * 0.587 + tex2D(texture2_, index + 2, y) * 0.299;
output[y*w +x] += tex2D(texture2_, index + z, y) * scalar[z];
//atomicAdd(output + (y*w + x), tex2D(texture2_, index + z, y) * scalar[z]);
}
}
int main(int argc, char* argv[]) {
const std::string filename(argv[1]);
const int batch_size = atoi(argv[2]);
std::cout << "batch size:" << batch_size << std::endl;
cv::Mat image = cv::imread(filename);
if (!image.isContinuous()) {
std::cout << "read image fail." << std::endl;
return -1;
}
unsigned char* d_image_input = nullptr;
float* d_image_gray = nullptr;
unsigned char* du_image_gray = nullptr;
const int w = image.cols;
const int h = image.rows;
const int stride = image.step;
const int channel = image.channels();
printf("input dims [%d, %d, %d, %d].\n", channel, w, h, stride);
hipMalloc((void**)&d_image_input, w * h * channel * sizeof(char));
hipMalloc((void**)&d_image_gray, w * h * sizeof(float));
hipMemcpy(d_image_input, image.data, w * h * channel * sizeof(char), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
const int threadsInBlock = 512;
const int block_thread = 16;
for (int i = 0; i < batch_size; ++i) {
/*
dim3 grid(div1(w * h, threadsInBlock), 1, 1);
dim3 block(threadsInBlock, 1, 1);
//gray_kernel1<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
gray_kernel2<<<KESTREL_KERNEL_CFG(w * h), 0>>>(d_image_input, d_image_gray, w, h, stride);
*/
//hipChannelFormatDesc channelDesc =
// hipCreateChannelDesc<unsigned char>();
//hipBindTexture(NULL, texture1_, d_image_input, stride * h);
dim3 block(block_thread, block_thread, 1);
dim3 grid(div1(w, block_thread), div1(h, block_thread), 1);
hipLaunchKernelGGL(( gray_kernel3), dim3(grid), dim3(block), 0, 0, d_image_input, d_image_gray, w, h, stride);
//gray_kernel4<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
/*
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc<unsigned char>();
hipError_t err = hipBindTexture2D(0, texture2_, d_image_input, channelDesc, w*3, h, stride);
dim3 block(block_thread, block_thread, 1);
dim3 grid(div1(w, block_thread), div1(h, block_thread), 1);
gray_kernel5<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
hipError_t err = hipBindTexture2D(0, texture2_, d_image_input, channelDesc, w*3, h, stride);
std::cout << "bindtexture err:" << err << std::endl;
dim3 block(block_thread, block_thread, 1);
dim3 grid(div1(w, block_thread), div1(h, block_thread), 3);
gray_kernel6<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
*/
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float used_time = 0.0f;
hipEventElapsedTime(&used_time, start, stop);
int len = w * h;
float *h_buffer = new float[len];
hipMemcpy(h_buffer, d_image_gray, w * h * sizeof(float), hipMemcpyDeviceToHost);
cv::Mat h_img(h, w, CV_32FC1, h_buffer);
cv::Mat gray_img;
h_img.convertTo(gray_img, CV_8U);
cv::imshow("picture", gray_img);
cv::waitKey();
std::cout << "cuda kernel run time:" << used_time << "ms" << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(d_image_input);
hipFree(d_image_gray);
return 0;
}
|
4ce6798c1c91ff03154b8bc2ccde07f7c0d76194.cu
|
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <cuda_runtime.h>
int div1(int M, int N) {
return ((M - 1) / N + 1);
}
#define KESTREL_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
#define MAX_THREAD_IN_BLOCK (512)
#define KESTREL_KERNEL_CFG(total) \
((total + MAX_THREAD_IN_BLOCK - 1) / MAX_THREAD_IN_BLOCK), MAX_THREAD_IN_BLOCK
texture<unsigned char, 1, cudaReadModeElementType> texture1_;
texture<unsigned char, 2, cudaReadModeElementType> texture2_;
void __global__ gray_kernel1(unsigned char* data, float* output, int w, int h , int stride) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < w * h) {
int _w = index % w;
int _h = index / w;
unsigned char* pixel = (unsigned char*)(data + (stride * _h + _w * 3));
output[index] = pixel[0] * 0.114 + pixel[1] * 0.587 + pixel[2] * 0.299;
}
}
__global__ void gray_kernel2(uint8_t *data, float *outdata, int32_t w, int32_t h, int32_t stride)
{
KESTREL_KERNEL_LOOP(index, w * h)
{
int _h = index / w;
int _w = index % w;
const uint8_t *IMAGE = (uint8_t *)(data + stride * _h + 3 * sizeof(uint8_t) * _w);
outdata[index] = IMAGE[0] * 0.114 + IMAGE[1] * 0.587 + IMAGE[2] * 0.299;
}
}
__global__ void gray_kernel3(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < w && y < h) {
unsigned char* pixel = data + (y * stride + x * 3);
output[y * w + x] = pixel[0] * 0.114 + pixel[1] * 0.587 + pixel[2] * 0.299;
}
}
__global__ void gray_kernel4(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < w && y < h) {
//unsigned char* pixel = tex1Dfetch(texture1_, stride * y + x * 3);
output[y * w + x] = tex1Dfetch(texture1_, stride * y + x * 3) * 0.114 + tex1Dfetch(texture1_, stride * y + x * 3 + 1) * 0.587 + tex1Dfetch(texture1_, stride * y + x * 3 + 2) * 0.299;
//output[y*w + x] = tex2D(texture1_, x, y) * 0.114 + tex2D(texture1_, x+1, y) * 0.587 + tex2D(texture1_, x+2, y) * 0.299;
}
}
__global__ void gray_kernel5(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = x * 3;
if (x < w && y < h) {
output[y * w + x] = tex2D(texture2_, index, y) * 0.114 + tex2D(texture2_, index + 1, y) * 0.587 + tex2D(texture2_, index + 2, y) * 0.299;
}
}
__global__ void gray_kernel6(unsigned char* data, float* output, int w, int h, int stride) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
float scalar[] = {0.114f, 0.587f, 0.299f};
int index = x * 3;
if (x < w && y < h) {
//output[y * w + x] = tex2D(texture2_, index, y) * 0.114 + tex2D(texture2_, index + 1, y) * 0.587 + tex2D(texture2_, index + 2, y) * 0.299;
output[y*w +x] += tex2D(texture2_, index + z, y) * scalar[z];
//atomicAdd(output + (y*w + x), tex2D(texture2_, index + z, y) * scalar[z]);
}
}
int main(int argc, char* argv[]) {
const std::string filename(argv[1]);
const int batch_size = atoi(argv[2]);
std::cout << "batch size:" << batch_size << std::endl;
cv::Mat image = cv::imread(filename);
if (!image.isContinuous()) {
std::cout << "read image fail." << std::endl;
return -1;
}
unsigned char* d_image_input = nullptr;
float* d_image_gray = nullptr;
unsigned char* du_image_gray = nullptr;
const int w = image.cols;
const int h = image.rows;
const int stride = image.step;
const int channel = image.channels();
printf("input dims [%d, %d, %d, %d].\n", channel, w, h, stride);
cudaMalloc((void**)&d_image_input, w * h * channel * sizeof(char));
cudaMalloc((void**)&d_image_gray, w * h * sizeof(float));
cudaMemcpy(d_image_input, image.data, w * h * channel * sizeof(char), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
const int threadsInBlock = 512;
const int block_thread = 16;
for (int i = 0; i < batch_size; ++i) {
/*
dim3 grid(div1(w * h, threadsInBlock), 1, 1);
dim3 block(threadsInBlock, 1, 1);
//gray_kernel1<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
gray_kernel2<<<KESTREL_KERNEL_CFG(w * h), 0>>>(d_image_input, d_image_gray, w, h, stride);
*/
//cudaChannelFormatDesc channelDesc =
// cudaCreateChannelDesc<unsigned char>();
//cudaBindTexture(NULL, texture1_, d_image_input, stride * h);
dim3 block(block_thread, block_thread, 1);
dim3 grid(div1(w, block_thread), div1(h, block_thread), 1);
gray_kernel3<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
//gray_kernel4<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
/*
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc<unsigned char>();
cudaError err = cudaBindTexture2D(0, texture2_, d_image_input, channelDesc, w*3, h, stride);
dim3 block(block_thread, block_thread, 1);
dim3 grid(div1(w, block_thread), div1(h, block_thread), 1);
gray_kernel5<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
cudaError err = cudaBindTexture2D(0, texture2_, d_image_input, channelDesc, w*3, h, stride);
std::cout << "bindtexture err:" << err << std::endl;
dim3 block(block_thread, block_thread, 1);
dim3 grid(div1(w, block_thread), div1(h, block_thread), 3);
gray_kernel6<<<grid, block>>>(d_image_input, d_image_gray, w, h, stride);
*/
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float used_time = 0.0f;
cudaEventElapsedTime(&used_time, start, stop);
int len = w * h;
float *h_buffer = new float[len];
cudaMemcpy(h_buffer, d_image_gray, w * h * sizeof(float), cudaMemcpyDeviceToHost);
cv::Mat h_img(h, w, CV_32FC1, h_buffer);
cv::Mat gray_img;
h_img.convertTo(gray_img, CV_8U);
cv::imshow("picture", gray_img);
cv::waitKey();
std::cout << "cuda kernel run time:" << used_time << "ms" << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(d_image_input);
cudaFree(d_image_gray);
return 0;
}
|
b9932ee138540ac5c03acba1cb3125c376629d08.hip
|
// !!! This is a file automatically generated by hipify!!!
// to avoid highlight problems
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h> // imported for rand() which generates a number between 0 & RAND_MAX
#include <time.h> // imported for the time() function and also the clock function
#include <limits> // for a large value
#include <cmath> // for exponentiation
using namespace std;
__global__ void FindClosestPoint(float3 *points, int *closestPoint, const int numberPoints)
{
// used to identify the thread that is currently running
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// now find the closest point to each point
// 'i' represents the current point that we are finding the closest point to!
int distanceBetweenPoints = 9999999, tempDistance = 0;
for (int j = 0; j < numberPoints; j++)
if (idx != j) // dont check the distance between the point and itself
{
tempDistance = pow((points[idx].x - points[j].x), 2) + pow((points[idx].y - points[j].y), 2);
if (tempDistance < distanceBetweenPoints)
{
distanceBetweenPoints = tempDistance;
closestPoint[idx] = j;
}
}
}
int main()
{
srand(time(NULL)); // used to initialize the seed for the random number generator
const int numberPoints = 1000;
clock_t startTime, endTime;
float3 *points = new float3[numberPoints];
float3 *pointsDeviceCopy;
int *closestPointDevice, *closestPoint = new int[numberPoints];
// initialize the points with random numbers
for (int i = 0; i < numberPoints; i++)
{
points[i].x = rand() % 1000;
points[i].y = rand() % 1000;
points[i].z = rand() % 1000;
}
// print the points initialized
for (int i = 0; i < numberPoints; i++)
cout << points[i].x << "\t" << points[i].y << "\t" << points[i].z << endl;
cout << endl;
// initialize memory in the GPU for calculation
if (hipMalloc(&pointsDeviceCopy, sizeof(float3) * numberPoints) != hipSuccess)
{
cout << "Couldn't initialize memory in the GPU for pointsDeviceCopy" << endl;
delete[] points;
delete[] closestPoint;
return 0;
}
if (hipMalloc(&closestPointDevice, sizeof(int) * numberPoints) != hipSuccess)
{
cout << "Couldn't initialize memory in the GPU for closestPointDevice" << endl;
hipFree(pointsDeviceCopy);
delete[] points;
delete[] closestPoint;
return 0;
}
if (hipMemcpy(pointsDeviceCopy, points, sizeof(float3) * numberPoints, hipMemcpyHostToDevice) != hipSuccess)
{
cout << "Could not copy points to pointsDeviceCopy" << endl;
hipFree(pointsDeviceCopy);
hipFree(closestPointDevice);
delete[] points;
delete[] closestPoint;
return 0;
}
// now find the distance between all points
startTime = clock();
// since a block can have upto 1024 elements, we can use a single block
hipLaunchKernelGGL(( FindClosestPoint), dim3(1), dim3(numberPoints), 0, 0, pointsDeviceCopy, closestPointDevice, numberPoints);
if (hipMemcpy(closestPoint, closestPointDevice, sizeof(int) * numberPoints, hipMemcpyDeviceToHost) != hipSuccess)
{
cout << "Could not get the output!";
hipFree(pointsDeviceCopy);
hipFree(closestPointDevice);
delete[] points;
delete[] closestPoint;
return 0;
}
endTime = clock() - startTime;
delete[] points;
delete[] closestPoint;
hipFree(closestPointDevice);
hipFree(pointsDeviceCopy);
cout << "Time it took was " << ((float)endTime / CLOCKS_PER_SEC) << endl;
return 0;
}
|
b9932ee138540ac5c03acba1cb3125c376629d08.cu
|
// to avoid highlight problems
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h> // imported for rand() which generates a number between 0 & RAND_MAX
#include <time.h> // imported for the time() function and also the clock function
#include <limits> // for a large value
#include <cmath> // for exponentiation
using namespace std;
__global__ void FindClosestPoint(float3 *points, int *closestPoint, const int numberPoints)
{
// used to identify the thread that is currently running
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// now find the closest point to each point
// 'i' represents the current point that we are finding the closest point to!
int distanceBetweenPoints = 9999999, tempDistance = 0;
for (int j = 0; j < numberPoints; j++)
if (idx != j) // dont check the distance between the point and itself
{
tempDistance = pow((points[idx].x - points[j].x), 2) + pow((points[idx].y - points[j].y), 2);
if (tempDistance < distanceBetweenPoints)
{
distanceBetweenPoints = tempDistance;
closestPoint[idx] = j;
}
}
}
int main()
{
srand(time(NULL)); // used to initialize the seed for the random number generator
const int numberPoints = 1000;
clock_t startTime, endTime;
float3 *points = new float3[numberPoints];
float3 *pointsDeviceCopy;
int *closestPointDevice, *closestPoint = new int[numberPoints];
// initialize the points with random numbers
for (int i = 0; i < numberPoints; i++)
{
points[i].x = rand() % 1000;
points[i].y = rand() % 1000;
points[i].z = rand() % 1000;
}
// print the points initialized
for (int i = 0; i < numberPoints; i++)
cout << points[i].x << "\t" << points[i].y << "\t" << points[i].z << endl;
cout << endl;
// initialize memory in the GPU for calculation
if (cudaMalloc(&pointsDeviceCopy, sizeof(float3) * numberPoints) != cudaSuccess)
{
cout << "Couldn't initialize memory in the GPU for pointsDeviceCopy" << endl;
delete[] points;
delete[] closestPoint;
return 0;
}
if (cudaMalloc(&closestPointDevice, sizeof(int) * numberPoints) != cudaSuccess)
{
cout << "Couldn't initialize memory in the GPU for closestPointDevice" << endl;
cudaFree(pointsDeviceCopy);
delete[] points;
delete[] closestPoint;
return 0;
}
if (cudaMemcpy(pointsDeviceCopy, points, sizeof(float3) * numberPoints, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout << "Could not copy points to pointsDeviceCopy" << endl;
cudaFree(pointsDeviceCopy);
cudaFree(closestPointDevice);
delete[] points;
delete[] closestPoint;
return 0;
}
// now find the distance between all points
startTime = clock();
// since a block can have upto 1024 elements, we can use a single block
FindClosestPoint<<<1, numberPoints>>>(pointsDeviceCopy, closestPointDevice, numberPoints);
if (cudaMemcpy(closestPoint, closestPointDevice, sizeof(int) * numberPoints, cudaMemcpyDeviceToHost) != cudaSuccess)
{
cout << "Could not get the output!";
cudaFree(pointsDeviceCopy);
cudaFree(closestPointDevice);
delete[] points;
delete[] closestPoint;
return 0;
}
endTime = clock() - startTime;
delete[] points;
delete[] closestPoint;
cudaFree(closestPointDevice);
cudaFree(pointsDeviceCopy);
cout << "Time it took was " << ((float)endTime / CLOCKS_PER_SEC) << endl;
return 0;
}
|
5ba85a733d2994e3023f6d7912c943b92334526b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "VanLeerRadialKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Rinf = NULL;
hipMalloc(&Rinf, XSIZE*YSIZE);
double *Rsup = NULL;
hipMalloc(&Rsup, XSIZE*YSIZE);
double *QRStar = NULL;
hipMalloc(&QRStar, XSIZE*YSIZE);
double *DensStar = NULL;
hipMalloc(&DensStar, XSIZE*YSIZE);
double *Vrad = NULL;
hipMalloc(&Vrad, XSIZE*YSIZE);
double *LostByDisk = NULL;
hipMalloc(&LostByDisk, XSIZE*YSIZE);
int nsec = 1;
int nrad = 1;
double dt = 1;
int OpenInner = 1;
double *Qbase = NULL;
hipMalloc(&Qbase, XSIZE*YSIZE);
double *invSurf = NULL;
hipMalloc(&invSurf, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
VanLeerRadialKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Rinf,Rsup,QRStar,DensStar,Vrad,LostByDisk,nsec,nrad,dt,OpenInner,Qbase,invSurf);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
VanLeerRadialKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Rinf,Rsup,QRStar,DensStar,Vrad,LostByDisk,nsec,nrad,dt,OpenInner,Qbase,invSurf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
VanLeerRadialKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, Rinf,Rsup,QRStar,DensStar,Vrad,LostByDisk,nsec,nrad,dt,OpenInner,Qbase,invSurf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5ba85a733d2994e3023f6d7912c943b92334526b.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "VanLeerRadialKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Rinf = NULL;
cudaMalloc(&Rinf, XSIZE*YSIZE);
double *Rsup = NULL;
cudaMalloc(&Rsup, XSIZE*YSIZE);
double *QRStar = NULL;
cudaMalloc(&QRStar, XSIZE*YSIZE);
double *DensStar = NULL;
cudaMalloc(&DensStar, XSIZE*YSIZE);
double *Vrad = NULL;
cudaMalloc(&Vrad, XSIZE*YSIZE);
double *LostByDisk = NULL;
cudaMalloc(&LostByDisk, XSIZE*YSIZE);
int nsec = 1;
int nrad = 1;
double dt = 1;
int OpenInner = 1;
double *Qbase = NULL;
cudaMalloc(&Qbase, XSIZE*YSIZE);
double *invSurf = NULL;
cudaMalloc(&invSurf, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
VanLeerRadialKernel<<<gridBlock,threadBlock>>>(Rinf,Rsup,QRStar,DensStar,Vrad,LostByDisk,nsec,nrad,dt,OpenInner,Qbase,invSurf);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
VanLeerRadialKernel<<<gridBlock,threadBlock>>>(Rinf,Rsup,QRStar,DensStar,Vrad,LostByDisk,nsec,nrad,dt,OpenInner,Qbase,invSurf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
VanLeerRadialKernel<<<gridBlock,threadBlock>>>(Rinf,Rsup,QRStar,DensStar,Vrad,LostByDisk,nsec,nrad,dt,OpenInner,Qbase,invSurf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
7441933d5e5761a0f7ef58097a431cfdf91e5aef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "AddVector.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeOctet = n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
HANDLE_ERROR(hipMalloc(&ptrDevV1, sizeOctet));
// TODO ptrV2
// TODO ptrW
}
// MM (memset Device)
{
HANDLE_ERROR(hipMemset(ptrDevW, 0, sizeOctet));
}
// MM (copy Host->Device)
{
HANDLE_ERROR(
hipMemcpy(ptrDevV1, ptrV1, sizeOctet,
hipMemcpyHostToDevice));
// TODO ptrV2
}
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug
}
// Grid
{
this->dg = dim3(16, 2, 1); // disons, a optimiser selon le gpu
this->db = dim3(32, 4, 1); // disons, a optimiser selon le gpu
Device::gridHeuristic(dg, db);
}
}
AddVector::~AddVector(void)
{
//MM (device free)
{
HANDLE_ERROR(hipFree(ptrDevV1));
// TODO ptrV2
// TODO ptrW
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
Device::lastCudaError("addVecteur (before)"); // temp debug
hipLaunchKernelGGL(( addVector), dim3(dg),dim3(db), 0, 0, ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone
Device::lastCudaError("addVecteur (after)"); // temp debug
Device::synchronize(); // Temp, only for printf in GPU
// MM (Device -> Host)
{
HANDLE_ERROR(
hipMemcpy(ptrW, ptrDevW, sizeOctet, hipMemcpyDeviceToHost)); // barriere synchronisation implicite
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
7441933d5e5761a0f7ef58097a431cfdf91e5aef.cu
|
#include "AddVector.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
AddVector::AddVector(float* ptrV1, float* ptrV2, float* ptrW, int n) :
ptrV1(ptrV1), ptrV2(ptrV2), ptrW(ptrW), n(n)
{
this->sizeOctet = n * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
HANDLE_ERROR(cudaMalloc(&ptrDevV1, sizeOctet));
// TODO ptrV2
// TODO ptrW
}
// MM (memset Device)
{
HANDLE_ERROR(cudaMemset(ptrDevW, 0, sizeOctet));
}
// MM (copy Host->Device)
{
HANDLE_ERROR(
cudaMemcpy(ptrDevV1, ptrV1, sizeOctet,
cudaMemcpyHostToDevice));
// TODO ptrV2
}
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug
}
// Grid
{
this->dg = dim3(16, 2, 1); // disons, a optimiser selon le gpu
this->db = dim3(32, 4, 1); // disons, a optimiser selon le gpu
Device::gridHeuristic(dg, db);
}
}
AddVector::~AddVector(void)
{
//MM (device free)
{
HANDLE_ERROR(cudaFree(ptrDevV1));
// TODO ptrV2
// TODO ptrW
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void AddVector::run()
{
Device::lastCudaError("addVecteur (before)"); // temp debug
addVector<<<dg,db>>>(ptrDevV1, ptrDevV2, ptrDevW, n); // assynchrone
Device::lastCudaError("addVecteur (after)"); // temp debug
Device::synchronize(); // Temp, only for printf in GPU
// MM (Device -> Host)
{
HANDLE_ERROR(
cudaMemcpy(ptrW, ptrDevW, sizeOctet, cudaMemcpyDeviceToHost)); // barriere synchronisation implicite
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
e0d159c23ad6ff117c636f1cc39fb9e90e94b483.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "config.h"
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// Add GPU kernel and functions
// HERE!!!
__global__ void probability_func(unsigned char *input,
unsigned char *output,
unsigned int width,
unsigned int height){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
unsigned char value = input[location];
output[location] = x%255;
}
__global__ void prefix_sum(double *output, double *input, int n)
{
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0, pin = 1;
temp[thid] = (thid > 0) ? input[thid] : 0;
__syncthreads();
for( int offset = 1; offset < n; offset <<= 1 )
{
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
else
temp[pout*n+thid] = temp[pin*n+thid];
__syncthreads();
}
output[thid] = temp[pout*n+thid]; // write output
}
__global__ void probability_function(unsigned int *input, double *output, unsigned int size, int bucket_count)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
//int size = height*width;
int location = y*TILE_SIZE*gridDim.x+x;
if(location<bucket_count)
{
//printf("initial[%d]=%d\n",location,input[location]);
double value = input[location];
output[location] =value/size;
//printf("probability[%d]=%lf\n",location,value/size);
}
}
__global__ void frequency_function(unsigned char *input, unsigned int *output,int size, int bucket_size)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
/*
unsigned char value = input[location];
int buck = ((int)value)/bucket_size;
output[buck] += 1;
*/
if (location < (size))
{
atomicAdd(&output[(unsigned int)(input[location])], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0xFF000000)], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0x00FF0000)], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0x0000FF00)], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0x000000FF)], 1);
}
}
__global__ void cdf_normalization(double *input,double *output,int count, int bucket_count, double offset, double alpha)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
if (location <bucket_count)
{
double value = input[location];
output[location]=(value-offset)*(bucket_count-1)*alpha;
}
}
__global__ void final_output(unsigned char *input,unsigned char *output,double *cdf,int bucket_size)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
unsigned char value = input[location];
int buck =(int)value;
output[location]=cdf[buck];
}
__global__ void warmup(unsigned char *input,unsigned char *output)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*(gridDim.x*TILE_SIZE)+x;
output[location] = 0;
}
// NOTE: The data passed on is already padded
void gpu_function(unsigned char *data,unsigned int height,unsigned int width)
{
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
int size = XSize*YSize;
int bucket_count = 256;
int bucket_size = 1;
//int max = 255/size;
unsigned char *input_gpu;
unsigned char *output_gpu;
double *probability_vector;
double *cdf_cpu_test_gpu;
unsigned int *frequency_vector;
double *cdf_vector;
double probability_cpu_double[bucket_count];
double cdf_cpu_test[bucket_count];
unsigned int probability_cpu_int[bucket_count];
double cdf_cpu[bucket_count];
double *cdf_norm;
unsigned int frequency_cpu[bucket_count];
//int length = sizeof(data)/sizeof(data[0]);
//printf("LENGTH == %d\n",length);
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&probability_vector , bucket_count*sizeof(double)));
checkCuda(hipMalloc((void**)&cdf_cpu_test_gpu , bucket_count*sizeof(double)));
checkCuda(hipMalloc((void**)&cdf_vector , bucket_count*sizeof(double)));
checkCuda(hipMalloc((void**)&frequency_vector , bucket_count*sizeof(unsigned int)));
checkCuda(hipMalloc((void**)&cdf_norm,bucket_count*sizeof(double)));
/*
for(int i=0;i<width*height;i++)
{
printf("DATA[%d]=%s\n",i,data[i]);
}
*/
//Initiliaze probability_cpu to 0
for(int i=0;i<bucket_count;i++)
{
probability_cpu_int[i]=0;
}
for(int i =0;i<bucket_count;i++)
{
probability_cpu_double[i]=0;
}
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu, data,size*sizeof(char), hipMemcpyHostToDevice));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
checkCuda(hipMemcpy(probability_vector,probability_cpu_double,bucket_count*sizeof(double),hipMemcpyHostToDevice));
checkCuda(hipMemcpy(frequency_vector,probability_cpu_int,bucket_count*sizeof(unsigned int),hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// Kernel Call
#ifdef CUDA_TIMING
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Add more kernels and functions as needed here
//norm_function<<<dimGrid, dimBlock>>>(input_gpu, output_gpu,width,height);
hipLaunchKernelGGL(( frequency_function), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,frequency_vector,size, bucket_size);
checkCuda(hipMemcpy(frequency_cpu,frequency_vector,bucket_count*sizeof(unsigned int),hipMemcpyDeviceToHost));
int count = 0;
for(int i=0;i<bucket_count;i++)
{
count += frequency_cpu[i];
}
printf("LENGTH = %d\n",count);
hipLaunchKernelGGL(( probability_function), dim3(dimGrid), dim3(dimBlock), 0, 0, frequency_vector,probability_vector,count,bucket_count);
// From here on, no need to change anything
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
//prefix_sum<<<dimGrid, dimBlock>>>(probability_vector, cdf_cpu_test_gpu, bucket_count);
//checkCuda(hipMemcpy(cdf_cpu_test, cdf_cpu_test_gpu,bucket_count*sizeof(double),hipMemcpyDeviceToHost));
checkCuda(hipMemcpy(probability_cpu_double,probability_vector,bucket_count*sizeof(double),hipMemcpyDeviceToHost));
/*
int min;
for(int i=0;i<256;i++)
{
if(probability_cpu_double[i]>0)
{
min = i;
}
}
if (max>0 && max <=150)
{
max = max+100;
}
else if(max>150 && max <=200)
{
max = max+50;
}
else if(max>200 && max<255)
{
max = max;
}
printf("MAX = %d",max);
*/
//double count = probability_cpu_double[0];
cdf_cpu[0]= probability_cpu_double[0];
////printf("at 0, cdf = %f, cdf_test = %f\n", cdf_cpu[0], cdf_cpu_test[0]);
printf("at 0, cdf = %f\n", cdf_cpu[0]);
for(int i=1;i<bucket_count;i++)
{
cdf_cpu[i] = probability_cpu_double[i]+cdf_cpu[i-1];
//count = count+ probability_cpu_double[i];
printf("at %d, cdf = %f\n", i, cdf_cpu[i]);
}
double offset, range,alpha;
offset = cdf_cpu[0];
range = cdf_cpu[bucket_count-1]-cdf_cpu[0];
alpha = 1/range;
/*
for(int i= 0;i<256;i++)
{
printf("probability[%d]=%lf \n",i,probability_cpu_double[i]);
}
for(int i= 0;i<256;i++)
{
printf("cdf[%d]=%lf\n",i,cdf_cpu[i]);
}
*/
//printf("COUNT = %lf\n",count);
checkCuda(hipMemcpy(cdf_vector,cdf_cpu,bucket_count*sizeof(double),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cdf_normalization), dim3(dimGrid), dim3(dimBlock), 0, 0, cdf_vector,cdf_norm,size, bucket_count, offset, alpha);
hipLaunchKernelGGL(( final_output), dim3(dimGrid),dim3(dimBlock), 0, 0, input_gpu,output_gpu, cdf_norm, bucket_size);
#ifdef CUDA_TIMING
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
checkCuda(hipPeekAtLastError());
checkCuda(hipDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,output_gpu,size*sizeof(unsigned char),hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
checkCuda(hipFree(probability_vector));
checkCuda(hipFree(frequency_vector));
checkCuda(hipFree(cdf_vector));
checkCuda(hipFree(cdf_norm));
}
void gpu_warmup(unsigned char *data, unsigned int height,unsigned int width){
unsigned char *input_gpu;
unsigned char *output_gpu;
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(hipMemcpy(input_gpu,
data,
size*sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
hipLaunchKernelGGL(( warmup), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
output_gpu);
checkCuda(hipDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(hipMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
hipMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
}
|
e0d159c23ad6ff117c636f1cc39fb9e90e94b483.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include "config.h"
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// Add GPU kernel and functions
// HERE!!!
__global__ void probability_func(unsigned char *input,
unsigned char *output,
unsigned int width,
unsigned int height){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
unsigned char value = input[location];
output[location] = x%255;
}
__global__ void prefix_sum(double *output, double *input, int n)
{
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0, pin = 1;
temp[thid] = (thid > 0) ? input[thid] : 0;
__syncthreads();
for( int offset = 1; offset < n; offset <<= 1 )
{
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
else
temp[pout*n+thid] = temp[pin*n+thid];
__syncthreads();
}
output[thid] = temp[pout*n+thid]; // write output
}
__global__ void probability_function(unsigned int *input, double *output, unsigned int size, int bucket_count)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
//int size = height*width;
int location = y*TILE_SIZE*gridDim.x+x;
if(location<bucket_count)
{
//printf("initial[%d]=%d\n",location,input[location]);
double value = input[location];
output[location] =value/size;
//printf("probability[%d]=%lf\n",location,value/size);
}
}
__global__ void frequency_function(unsigned char *input, unsigned int *output,int size, int bucket_size)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
/*
unsigned char value = input[location];
int buck = ((int)value)/bucket_size;
output[buck] += 1;
*/
if (location < (size))
{
atomicAdd(&output[(unsigned int)(input[location])], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0xFF000000)], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0x00FF0000)], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0x0000FF00)], 1);
//atomicAdd(&output[(unsigned int)(input[location] & 0x000000FF)], 1);
}
}
__global__ void cdf_normalization(double *input,double *output,int count, int bucket_count, double offset, double alpha)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
if (location <bucket_count)
{
double value = input[location];
output[location]=(value-offset)*(bucket_count-1)*alpha;
}
}
__global__ void final_output(unsigned char *input,unsigned char *output,double *cdf,int bucket_size)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
unsigned char value = input[location];
int buck =(int)value;
output[location]=cdf[buck];
}
__global__ void warmup(unsigned char *input,unsigned char *output)
{
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*(gridDim.x*TILE_SIZE)+x;
output[location] = 0;
}
// NOTE: The data passed on is already padded
void gpu_function(unsigned char *data,unsigned int height,unsigned int width)
{
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
int size = XSize*YSize;
int bucket_count = 256;
int bucket_size = 1;
//int max = 255/size;
unsigned char *input_gpu;
unsigned char *output_gpu;
double *probability_vector;
double *cdf_cpu_test_gpu;
unsigned int *frequency_vector;
double *cdf_vector;
double probability_cpu_double[bucket_count];
double cdf_cpu_test[bucket_count];
unsigned int probability_cpu_int[bucket_count];
double cdf_cpu[bucket_count];
double *cdf_norm;
unsigned int frequency_cpu[bucket_count];
//int length = sizeof(data)/sizeof(data[0]);
//printf("LENGTH == %d\n",length);
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&probability_vector , bucket_count*sizeof(double)));
checkCuda(cudaMalloc((void**)&cdf_cpu_test_gpu , bucket_count*sizeof(double)));
checkCuda(cudaMalloc((void**)&cdf_vector , bucket_count*sizeof(double)));
checkCuda(cudaMalloc((void**)&frequency_vector , bucket_count*sizeof(unsigned int)));
checkCuda(cudaMalloc((void**)&cdf_norm,bucket_count*sizeof(double)));
/*
for(int i=0;i<width*height;i++)
{
printf("DATA[%d]=%s\n",i,data[i]);
}
*/
//Initiliaze probability_cpu to 0
for(int i=0;i<bucket_count;i++)
{
probability_cpu_int[i]=0;
}
for(int i =0;i<bucket_count;i++)
{
probability_cpu_double[i]=0;
}
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu, data,size*sizeof(char), cudaMemcpyHostToDevice));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
checkCuda(cudaMemcpy(probability_vector,probability_cpu_double,bucket_count*sizeof(double),cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(frequency_vector,probability_cpu_int,bucket_count*sizeof(unsigned int),cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// Kernel Call
#ifdef CUDA_TIMING
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Add more kernels and functions as needed here
//norm_function<<<dimGrid, dimBlock>>>(input_gpu, output_gpu,width,height);
frequency_function<<<dimGrid, dimBlock>>>(input_gpu,frequency_vector,size, bucket_size);
checkCuda(cudaMemcpy(frequency_cpu,frequency_vector,bucket_count*sizeof(unsigned int),cudaMemcpyDeviceToHost));
int count = 0;
for(int i=0;i<bucket_count;i++)
{
count += frequency_cpu[i];
}
printf("LENGTH = %d\n",count);
probability_function<<<dimGrid, dimBlock>>>(frequency_vector,probability_vector,count,bucket_count);
// From here on, no need to change anything
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
//prefix_sum<<<dimGrid, dimBlock>>>(probability_vector, cdf_cpu_test_gpu, bucket_count);
//checkCuda(cudaMemcpy(cdf_cpu_test, cdf_cpu_test_gpu,bucket_count*sizeof(double),cudaMemcpyDeviceToHost));
checkCuda(cudaMemcpy(probability_cpu_double,probability_vector,bucket_count*sizeof(double),cudaMemcpyDeviceToHost));
/*
int min;
for(int i=0;i<256;i++)
{
if(probability_cpu_double[i]>0)
{
min = i;
}
}
if (max>0 && max <=150)
{
max = max+100;
}
else if(max>150 && max <=200)
{
max = max+50;
}
else if(max>200 && max<255)
{
max = max;
}
printf("MAX = %d",max);
*/
//double count = probability_cpu_double[0];
cdf_cpu[0]= probability_cpu_double[0];
////printf("at 0, cdf = %f, cdf_test = %f\n", cdf_cpu[0], cdf_cpu_test[0]);
printf("at 0, cdf = %f\n", cdf_cpu[0]);
for(int i=1;i<bucket_count;i++)
{
cdf_cpu[i] = probability_cpu_double[i]+cdf_cpu[i-1];
//count = count+ probability_cpu_double[i];
printf("at %d, cdf = %f\n", i, cdf_cpu[i]);
}
double offset, range,alpha;
offset = cdf_cpu[0];
range = cdf_cpu[bucket_count-1]-cdf_cpu[0];
alpha = 1/range;
/*
for(int i= 0;i<256;i++)
{
printf("probability[%d]=%lf \n",i,probability_cpu_double[i]);
}
for(int i= 0;i<256;i++)
{
printf("cdf[%d]=%lf\n",i,cdf_cpu[i]);
}
*/
//printf("COUNT = %lf\n",count);
checkCuda(cudaMemcpy(cdf_vector,cdf_cpu,bucket_count*sizeof(double),cudaMemcpyHostToDevice));
cdf_normalization<<<dimGrid, dimBlock>>>(cdf_vector,cdf_norm,size, bucket_count, offset, alpha);
final_output<<<dimGrid,dimBlock>>>(input_gpu,output_gpu, cdf_norm, bucket_size);
#ifdef CUDA_TIMING
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,output_gpu,size*sizeof(unsigned char),cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
checkCuda(cudaFree(probability_vector));
checkCuda(cudaFree(frequency_vector));
checkCuda(cudaFree(cdf_vector));
checkCuda(cudaFree(cdf_norm));
}
void gpu_warmup(unsigned char *data, unsigned int height,unsigned int width){
unsigned char *input_gpu;
unsigned char *output_gpu;
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
warmup<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
273652c56997a89e6c2ea511c57ec6ae272a0d70.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from ztrtri_diag_batched.cu normal z -> d, Fri Jan 30 19:00:10 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "common_magma.h"
#include "dtrtri.h"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Same as dtrtri_diag, but adds queue argument.
@ingroup magma_dblas3
********************************************************************/
/**
Purpose
-------
dtrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array DOUBLE_PRECISION array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
dinvA_array DOUBLE_PRECISION array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
double const * const *dA_array, magma_int_t ldda,
double **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = (n + IB - 1)/IB;
if(resetozero)
{
magmablas_dlaset_batched(MagmaFull, ((n+NB-1)/NB)*NB, NB, MAGMA_D_ZERO, MAGMA_D_ZERO, dinvA_array, ((n+NB-1)/NB)*NB, batchCount, queue);
//magmablas_dmemset_batched( dinvA_array, ((n+NB-1)/NB)*NB*NB, batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_dlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( dtrtri_diag_lower_kernel_batched), dim3(diaggrid), dim3(IB), 0, queue , diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( dtrtri_diag_upper_kernel_batched), dim3(diaggrid), dim3(IB), 0, queue , diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
273652c56997a89e6c2ea511c57ec6ae272a0d70.cu
|
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from ztrtri_diag_batched.cu normal z -> d, Fri Jan 30 19:00:10 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "common_magma.h"
#include "dtrtri.h"
/**
Inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Same as dtrtri_diag, but adds queue argument.
@ingroup magma_dblas3
********************************************************************/
/**
Purpose
-------
dtrtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array DOUBLE_PRECISION array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
dinvA_array DOUBLE_PRECISION array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dtrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
double const * const *dA_array, magma_int_t ldda,
double **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = (n + IB - 1)/IB;
if(resetozero)
{
magmablas_dlaset_batched(MagmaFull, ((n+NB-1)/NB)*NB, NB, MAGMA_D_ZERO, MAGMA_D_ZERO, dinvA_array, ((n+NB-1)/NB)*NB, batchCount, queue);
//magmablas_dmemset_batched( dinvA_array, ((n+NB-1)/NB)*NB*NB, batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_dlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
dtrtri_diag_lower_kernel_batched<<< diaggrid, IB, 0, queue >>>( diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_dgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_dgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_dgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_dgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
dtrtri_diag_upper_kernel_batched<<< diaggrid, IB, 0, queue >>>( diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_dgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_dgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_dgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_dgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
affb12ab85c2f606b75364e945186e5f1503f565.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void dual(float* p1, float* p2, const float* u_, const double lambda, const double sigma, const int X, const int Y)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// center point
int c = y*X + x;
float nabla_x = 0.0f;
float nabla_y = 0.0f;
if (x < X-1)
nabla_x = u_[c+1]-u_[c];
if (y < Y-1)
nabla_y = u_[c+X]-u_[c];
//p1[c] = fmaxf(-lambda, fminf(lambda, p1[c] + sigma*nabla_x));
//p2[c] = fmaxf(-lambda, fminf(lambda, p2[c] + sigma*nabla_y));
p1[c] += sigma*nabla_x;
p2[c] += sigma*nabla_y;
float denom = fmaxf(1.0f, sqrt(p1[c]*p1[c] + p2[c]*p2[c])/lambda);
p1[c] /= denom;
p2[c] /= denom;
}
|
affb12ab85c2f606b75364e945186e5f1503f565.cu
|
#include "includes.h"
__global__ void dual(float* p1, float* p2, const float* u_, const double lambda, const double sigma, const int X, const int Y)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// center point
int c = y*X + x;
float nabla_x = 0.0f;
float nabla_y = 0.0f;
if (x < X-1)
nabla_x = u_[c+1]-u_[c];
if (y < Y-1)
nabla_y = u_[c+X]-u_[c];
//p1[c] = fmaxf(-lambda, fminf(lambda, p1[c] + sigma*nabla_x));
//p2[c] = fmaxf(-lambda, fminf(lambda, p2[c] + sigma*nabla_y));
p1[c] += sigma*nabla_x;
p2[c] += sigma*nabla_y;
float denom = fmaxf(1.0f, sqrt(p1[c]*p1[c] + p2[c]*p2[c])/lambda);
p1[c] /= denom;
p2[c] /= denom;
}
|
e9dde0bf69c79c552301f91e16a583d9beb1ea18.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by songzeceng on 2020/11/11.
//
#include "stdio.h"
#include "hip/hip_runtime.h"
void getOccupancy();
__global__ void MyKernel(int* a, int* b, int* c) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
c[idx] = a[idx] + b[idx];
}
__global__ void MyKernel2(int* a, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
a[idx] = a[idx] + 3;
}
}
int main() {
// getOccupancy();
int blockSize, minGridSize, gridSize;
int size;
scanf("%d", &size);
int *h_data = (int *) malloc(size * sizeof(int));
int *d_data;
for (int i = 0; i < size; i++) {
h_data[i] = i * i;
}
hipMalloc(&d_data, size * sizeof(int ));
hipMemcpy(d_data, h_data, size * sizeof(int ), hipMemcpyHostToDevice);
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void *)MyKernel2, 0, size);
gridSize = (size + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( MyKernel2), dim3(gridSize), dim3(blockSize), 0, 0, d_data, size);
hipMemcpy(h_data, d_data, size * sizeof(int ), hipMemcpyDeviceToHost);
for (int i = 0; i < size; i++) {
printf("%d\n", h_data[i]);
}
hipFree(d_data);
free(h_data);
return 0;
}
void getOccupancy() {
int numBlocks;
int blockSize = 32;
int device = 0, activeWraps, maxWarps;
hipDeviceProp_t prop;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, MyKernel, blockSize, 0);
activeWraps = numBlocks * blockSize / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
printf("Occupancy: %.3f\n", activeWraps / (double ) maxWarps);
}
|
e9dde0bf69c79c552301f91e16a583d9beb1ea18.cu
|
//
// Created by songzeceng on 2020/11/11.
//
#include "stdio.h"
#include "cuda_runtime.h"
void getOccupancy();
__global__ void MyKernel(int* a, int* b, int* c) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
c[idx] = a[idx] + b[idx];
}
__global__ void MyKernel2(int* a, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
a[idx] = a[idx] + 3;
}
}
int main() {
// getOccupancy();
int blockSize, minGridSize, gridSize;
int size;
scanf("%d", &size);
int *h_data = (int *) malloc(size * sizeof(int));
int *d_data;
for (int i = 0; i < size; i++) {
h_data[i] = i * i;
}
cudaMalloc(&d_data, size * sizeof(int ));
cudaMemcpy(d_data, h_data, size * sizeof(int ), cudaMemcpyHostToDevice);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void *)MyKernel2, 0, size);
gridSize = (size + blockSize - 1) / blockSize;
MyKernel2<<<gridSize, blockSize>>>(d_data, size);
cudaMemcpy(h_data, d_data, size * sizeof(int ), cudaMemcpyDeviceToHost);
for (int i = 0; i < size; i++) {
printf("%d\n", h_data[i]);
}
cudaFree(d_data);
free(h_data);
return 0;
}
void getOccupancy() {
int numBlocks;
int blockSize = 32;
int device = 0, activeWraps, maxWarps;
cudaDeviceProp prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, MyKernel, blockSize, 0);
activeWraps = numBlocks * blockSize / prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
printf("Occupancy: %.3f\n", activeWraps / (double ) maxWarps);
}
|
4acd5103e2bb4a581e70f1dacb0a642876b5291a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// rwoDeli: use gpu shared memory
#include "config.h"
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
// shared memory for rowDeli
__shared__ int rowDeli[BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[BLOCK_SIZE];
// first thread does the copy to shared memory
if(t==0)
{
for(int i=0;i<BLOCK_SIZE/WARP_SIZE+1;i++)
rowDeli[i]= rowDelimiters[myRow+i];
}
// if(t==BLOCK_SIZE-1) rowDeli[t/WARP_SIZE+1]= rowDelimiters[myRow+1];
__syncthreads();
if (myRow < dim)
{ // printf("%d\n",rowDelimiters[myRow]);
int warpStart =rowDeli[t/WARP_SIZE];//rowDelimiters[myRow];
int warpEnd = rowDeli[t/WARP_SIZE+1];//rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * (spmv_numRows / SFactor); // 2% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
// warm up the GPU
for (int i=0; i<5; i++) // repeat 10 times
{
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
for (int i=0; i<ITERATIONS; i++) // repeat 10 times
{
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
// spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
4acd5103e2bb4a581e70f1dacb0a642876b5291a.cu
|
// rwoDeli: use gpu shared memory
#include "config.h"
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
// shared memory for rowDeli
__shared__ int rowDeli[BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[BLOCK_SIZE];
// first thread does the copy to shared memory
if(t==0)
{
for(int i=0;i<BLOCK_SIZE/WARP_SIZE+1;i++)
rowDeli[i]= rowDelimiters[myRow+i];
}
// if(t==BLOCK_SIZE-1) rowDeli[t/WARP_SIZE+1]= rowDelimiters[myRow+1];
__syncthreads();
if (myRow < dim)
{ // printf("%d\n",rowDelimiters[myRow]);
int warpStart =rowDeli[t/WARP_SIZE];//rowDelimiters[myRow];
int warpEnd = rowDeli[t/WARP_SIZE+1];//rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * (spmv_numRows / SFactor); // 2% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
// warm up the GPU
for (int i=0; i<5; i++) // repeat 10 times
{
spmv_kernel <<<spmv_grid, BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
for (int i=0; i<ITERATIONS; i++) // repeat 10 times
{
spmv_kernel <<<spmv_grid, BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
// spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
39304927decc8dcf45b274bf38a3a80b2d1b1f95.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#ifdef __HIPCC__
#define cuda_SYNCTHREADS() __syncthreads();
#else
#define cuda_SYNCTHREADS()
#endif
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#define N 32
/*
calcula el valor aproximado de pi, realizando particiones (entre mas, hay mas aproximacion al valor)
*/
__host__ float func(float valor)
{
return 4 / (1 + powf(valor,2));
}
__global__ void calcula(float *particion, float *funcion, float *sum)
{
//reserva dinamica de memoria compartida en tiempo de ejecucion
extern __shared__ float temporal[];
float add[N];
//float h = (1 - 0) / N;
int id = threadIdx.x;// +blockIdx.x * blockDim.x;
float xi, xim;
float yi, yim;
//printf("%.2f, \n", particion[id]);
xi = particion[id];
xim = particion[id - 1];
yi = funcion[id];
yim = funcion[id - 1];
add[id] = .5f * ((xi - xim) * (yi + yim));
temporal[id] = add[id];
printf("(%.4f - %.4f) * (%.4f + %.4f): %.4f\n", xi, xim, yi, yim, temporal[id]);
cuda_SYNCTHREADS();
//reduccion paralela
int salto = N / 2;
//realizamos log2(N) iteraciones
while (salto)
{
//solo trabajan la mitad de los hilos
if (id < salto)
{
temporal[id] = temporal[id] + temporal[id + salto];
}
//cuda_SYNCTHREADS();
cuda_SYNCTHREADS();
salto = salto / 2;
}
//el hilo 0 escribe el resultado final en la memoria global
if (id == 0)
{
*sum = temporal[id];
//printf("temporal: %.3f\n", *sum);
}
}
int main(int argc, char** argv)
{
float *vector1, *vector2, *resultado;
float *dev_vector1, *dev_vector2, *dev_resultado;
size_t size = N * sizeof(float);
//reserva de memoria en el host
vector1 = (float*)malloc(size);
vector2 = (float*)malloc(size);
resultado = (float*)malloc(size);
//reserva de memoria en el device
hipMalloc((void**)&dev_vector1, size);
hipMalloc((void**)&dev_vector2, size);
hipMalloc((void**)&dev_resultado, size);
// inicializacion de los vectores
for (int i = 0; i < N; i++) {
vector1[i] = (float)i / (N - 1);
vector2[i] = func(vector1[i]);
//printf("xi: %.2f, f(xi): %.2f \n", vector1[i], vector2[i]);
}
//enviar los datos hacia el Device
hipMemcpy(dev_vector1, vector1, size, hipMemcpyHostToDevice);
hipMemcpy(dev_vector2, vector2, size, hipMemcpyHostToDevice);
//hipMemcpy(dev_resultado, resultado, size, hipMemcpyHostToDevice);
//lanzamiento del kernel con memoria dinamica compartida
hipLaunchKernelGGL(( calcula) , dim3(1), dim3(N), size, 0, dev_vector1, dev_vector2, dev_resultado);
//recogida de los datos
hipMemcpy(resultado, dev_resultado, size, hipMemcpyDeviceToHost);
printf("pi = %.5f, \n", resultado[0]);
return 0;
}
|
39304927decc8dcf45b274bf38a3a80b2d1b1f95.cu
|
/*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#ifdef __CUDACC__
#define cuda_SYNCTHREADS() __syncthreads();
#else
#define cuda_SYNCTHREADS()
#endif
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#define N 32
/*
calcula el valor aproximado de pi, realizando particiones (entre mas, hay mas aproximacion al valor)
*/
__host__ float func(float valor)
{
return 4 / (1 + powf(valor,2));
}
__global__ void calcula(float *particion, float *funcion, float *sum)
{
//reserva dinamica de memoria compartida en tiempo de ejecucion
extern __shared__ float temporal[];
float add[N];
//float h = (1 - 0) / N;
int id = threadIdx.x;// +blockIdx.x * blockDim.x;
float xi, xim;
float yi, yim;
//printf("%.2f, \n", particion[id]);
xi = particion[id];
xim = particion[id - 1];
yi = funcion[id];
yim = funcion[id - 1];
add[id] = .5f * ((xi - xim) * (yi + yim));
temporal[id] = add[id];
printf("(%.4f - %.4f) * (%.4f + %.4f): %.4f\n", xi, xim, yi, yim, temporal[id]);
cuda_SYNCTHREADS();
//reduccion paralela
int salto = N / 2;
//realizamos log2(N) iteraciones
while (salto)
{
//solo trabajan la mitad de los hilos
if (id < salto)
{
temporal[id] = temporal[id] + temporal[id + salto];
}
//cuda_SYNCTHREADS();
cuda_SYNCTHREADS();
salto = salto / 2;
}
//el hilo 0 escribe el resultado final en la memoria global
if (id == 0)
{
*sum = temporal[id];
//printf("temporal: %.3f\n", *sum);
}
}
int main(int argc, char** argv)
{
float *vector1, *vector2, *resultado;
float *dev_vector1, *dev_vector2, *dev_resultado;
size_t size = N * sizeof(float);
//reserva de memoria en el host
vector1 = (float*)malloc(size);
vector2 = (float*)malloc(size);
resultado = (float*)malloc(size);
//reserva de memoria en el device
cudaMalloc((void**)&dev_vector1, size);
cudaMalloc((void**)&dev_vector2, size);
cudaMalloc((void**)&dev_resultado, size);
// inicializacion de los vectores
for (int i = 0; i < N; i++) {
vector1[i] = (float)i / (N - 1);
vector2[i] = func(vector1[i]);
//printf("xi: %.2f, f(xi): %.2f \n", vector1[i], vector2[i]);
}
//enviar los datos hacia el Device
cudaMemcpy(dev_vector1, vector1, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_vector2, vector2, size, cudaMemcpyHostToDevice);
//cudaMemcpy(dev_resultado, resultado, size, cudaMemcpyHostToDevice);
//lanzamiento del kernel con memoria dinamica compartida
calcula <<<1, N, size>>>(dev_vector1, dev_vector2, dev_resultado);
//recogida de los datos
cudaMemcpy(resultado, dev_resultado, size, cudaMemcpyDeviceToHost);
printf("pi = %.5f, \n", resultado[0]);
return 0;
}
|
e133c3ca4101e2fe8829b130013b8304da0173f9.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 4096 *4
#define NJ 4096 * 4
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv2D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)]
+ c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)]
+ c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)];
}
}
}
void init(DATA_TYPE* A, DATA_TYPE* A_gpu)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
float temp = (float)rand()/RAND_MAX;
A[i*NJ + j] = temp;
A_gpu[i*NJ + j] = temp;
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=1; i < (NI-1); i++)
{
for (j=1; j < (NJ-1); j++)
{
if (percentDiff(B[i*NJ + j], B_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void Convolution2D_kernel(DATA_TYPE *A, DATA_TYPE *B)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0))
{
B[i * NJ + j] = c11 * A[(i - 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] + c31 * A[(i - 1) * NJ + (j + 1)]
+ c12 * A[(i + 0) * NJ + (j - 1)] + c22 * A[(i + 0) * NJ + (j + 0)] + c32 * A[(i + 0) * NJ + (j + 1)]
+ c13 * A[(i + 1) * NJ + (j - 1)] + c23 * A[(i + 1) * NJ + (j + 0)] + c33 * A[(i + 1) * NJ + (j + 1)];
}
}
void convolution2DCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)NI) / ((float)block.x) ), (size_t)ceil( ((float)NJ) / ((float)block.y)) );
t_start = rtclock();
#ifdef PREF
hipStream_t stream1;
hipStream_t stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
for (int i = 0; i < 1; i++)
{
hipMemPrefetchAsync(A_gpu,NI*NJ*sizeof(DATA_TYPE), GPU_DEVICE, stream1 );
hipStreamSynchronize(stream1);
hipMemPrefetchAsync(B_gpu,NI*NJ*sizeof(DATA_TYPE), GPU_DEVICE, stream2 );
hipStreamSynchronize(stream2);
// hipMemset(B_gpu,0 ,NI*NJ*sizeof(DATA_TYPE));
hipLaunchKernelGGL(( Convolution2D_kernel), dim3(grid),dim3(block), 0,stream2, A_gpu,B_gpu);
hipDeviceSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);//);
#else
for (int i = 0; i < 1; i++)
{
hipLaunchKernelGGL(( Convolution2D_kernel), dim3(grid),dim3(block), 0, 0, A_gpu,B_gpu);
hipDeviceSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);//);
#endif
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
A = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMallocManaged(&B_gpu, sizeof(DATA_TYPE) * NI * NJ);
// B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
//initialize the arrays
init(A, A_gpu);
GPU_argv_init();
convolution2DCuda(A_gpu, B_gpu);
t_start = rtclock();
conv2D(A, B);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);//);
compareResults(B, B_gpu);
free(A);
free(B);
hipFree(A_gpu);
hipFree(B_gpu);
return 0;
}
|
e133c3ca4101e2fe8829b130013b8304da0173f9.cu
|
/**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 4096 *4
#define NJ 4096 * 4
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv2D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)]
+ c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)]
+ c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)];
}
}
}
void init(DATA_TYPE* A, DATA_TYPE* A_gpu)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
float temp = (float)rand()/RAND_MAX;
A[i*NJ + j] = temp;
A_gpu[i*NJ + j] = temp;
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=1; i < (NI-1); i++)
{
for (j=1; j < (NJ-1); j++)
{
if (percentDiff(B[i*NJ + j], B_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void Convolution2D_kernel(DATA_TYPE *A, DATA_TYPE *B)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0))
{
B[i * NJ + j] = c11 * A[(i - 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] + c31 * A[(i - 1) * NJ + (j + 1)]
+ c12 * A[(i + 0) * NJ + (j - 1)] + c22 * A[(i + 0) * NJ + (j + 0)] + c32 * A[(i + 0) * NJ + (j + 1)]
+ c13 * A[(i + 1) * NJ + (j - 1)] + c23 * A[(i + 1) * NJ + (j + 0)] + c33 * A[(i + 1) * NJ + (j + 1)];
}
}
void convolution2DCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)NI) / ((float)block.x) ), (size_t)ceil( ((float)NJ) / ((float)block.y)) );
t_start = rtclock();
#ifdef PREF
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
for (int i = 0; i < 1; i++)
{
cudaMemPrefetchAsync(A_gpu,NI*NJ*sizeof(DATA_TYPE), GPU_DEVICE, stream1 );
cudaStreamSynchronize(stream1);
cudaMemPrefetchAsync(B_gpu,NI*NJ*sizeof(DATA_TYPE), GPU_DEVICE, stream2 );
cudaStreamSynchronize(stream2);
// cudaMemset(B_gpu,0 ,NI*NJ*sizeof(DATA_TYPE));
Convolution2D_kernel<<<grid,block, 0,stream2>>>(A_gpu,B_gpu);
cudaDeviceSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);//);
#else
for (int i = 0; i < 1; i++)
{
Convolution2D_kernel<<<grid,block>>>(A_gpu,B_gpu);
cudaDeviceSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);//);
#endif
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
A = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
cudaMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMallocManaged(&B_gpu, sizeof(DATA_TYPE) * NI * NJ);
// B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
//initialize the arrays
init(A, A_gpu);
GPU_argv_init();
convolution2DCuda(A_gpu, B_gpu);
t_start = rtclock();
conv2D(A, B);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);//);
compareResults(B, B_gpu);
free(A);
free(B);
cudaFree(A_gpu);
cudaFree(B_gpu);
return 0;
}
|
893c92954978e24405674e555345bc2cbfafef6d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "nccl.h"
#include "core.h"
#include "ring.h"
#include "param.h"
#include "nvmlwrap.h"
#include "rings.h"
#include "bootstrap.h"
#include "transport.h"
#include "common_coll.h"
#include "group.h"
#include "utils.h"
#include "net.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sched.h>
#include <fcntl.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <dlfcn.h>
#define STR2(v) #v
#define STR(v) STR2(v)
int ncclDebugLevel;
uint64_t ncclDebugMask = NCCL_INIT; // Default debug sub-system mask is INIT
pthread_mutex_t ncclDebugOutputLock;
FILE *ncclDebugFile = stdout;
#ifdef ENABLE_TRACE
std::chrono::high_resolution_clock::time_point ncclEpoch;
#endif
#if __CUDACC_VER_MAJOR__ >= 10 || (__CUDACC_VER_MAJOR__ >= 9 && __CUDACC_VER_MINOR__ >= 2)
#define NCCL_GROUP_CUDA_STREAM 0 // CGMD: CUDA 9.2,10.X Don't need to use an internal CUDA stream
#else
#define NCCL_GROUP_CUDA_STREAM 1 // CGMD: CUDA 9.0,9.1 Need to use an internal CUDA stream
#endif
NCCL_PARAM(GroupCudaStream, "GROUP_CUDA_STREAM", NCCL_GROUP_CUDA_STREAM);
NCCL_PARAM(CheckPointers, "CHECK_POINTERS", 0);
ncclNet_t* ncclNet = NULL;
// We define this as weak to let tests redefine their own
#pragma weak ncclCudaCompCap
int ncclCudaCompCap() {
int cudaDev;
if (hipGetDevice(&cudaDev) != hipSuccess) return 0;
int ccMajor;
if (hipDeviceGetAttribute(&ccMajor, hipDeviceAttributeComputeCapabilityMajor, cudaDev) != hipSuccess) return 0;
return ccMajor;
}
int ncclCudaFullCompCap() {
int cudaDev;
if (hipGetDevice(&cudaDev) != hipSuccess) return 0;
int ccMajor, ccMinor;
if (hipDeviceGetAttribute(&ccMajor, hipDeviceAttributeComputeCapabilityMajor, cudaDev) != hipSuccess) return 0;
if (hipDeviceGetAttribute(&ccMinor, hipDeviceAttributeComputeCapabilityMinor, cudaDev) != hipSuccess) return 0;
return ccMajor*10+ccMinor;
}
// Returns ncclInternalError if anything fails, causing that network to be ignored.
ncclResult_t initNet(ncclNet_t* net) {
int ndev;
if (net->init(ncclDebugLog) != ncclSuccess) return ncclInternalError;
if (net->devices(&ndev) != ncclSuccess) return ncclInternalError;
if (ndev <= 0) {
INFO(NCCL_INIT|NCCL_NET, "Net/%s: call to devices() returned 0 devices.", net->name);
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t initNetPlugin(ncclNet_t** net) {
void* netPluginLib = dlopen("libnccl-net.so", RTLD_NOW | RTLD_LOCAL);
if (netPluginLib == NULL) {
// dlopen does not guarantee to set errno, but dlerror only gives us a
// string, so checking errno doesn't hurt to try to provide a better
// error message
if (errno == ENOENT) {
INFO(NCCL_INIT|NCCL_NET, "No network plugin found.");
} else {
INFO(NCCL_INIT|NCCL_NET, "Unable to load libnccl-net.so : %s", dlerror());
}
return ncclSuccess;
}
ncclNet_t* extNet = (ncclNet_t*) dlsym(netPluginLib, STR(NCCL_PLUGIN_SYMBOL));
if (extNet == NULL) {
INFO(NCCL_INIT|NCCL_NET, "NetPlugin: could not find " STR(NCCL_PLUGIN_SYMBOL) " symbol");
goto cleanup;
}
if (initNet(extNet) == ncclSuccess) {
*net = extNet;
return ncclSuccess;
}
cleanup:
if (netPluginLib != NULL) dlclose(netPluginLib);
return ncclSuccess;
}
ncclResult_t initNet() {
// Always initialize sockets as we use it for bootstrap
NCCLCHECK(initNet(&ncclNetSocket));
NCCLCHECK(initNetPlugin(&ncclNet));
if (ncclNet != NULL) {
INFO(NCCL_INIT|NCCL_NET, "Using network plugin %s", ncclNetName());
return ncclSuccess;
}
if (initNet(&ncclNetIb) == ncclSuccess) {
ncclNet = &ncclNetIb;
} else {
ncclNet = &ncclNetSocket;
}
INFO(NCCL_INIT|NCCL_NET,"Using network %s", ncclNetName());
return ncclSuccess;
}
NCCL_PARAM(LlThreshold, "LL_THRESHOLD", -2);
NCCL_PARAM(ThreadThreshold, "THREAD_THRESHOLD", -2);
int ncclThreadThreshold(int minCompCap, int multiNode) {
int threshold = ncclParamThreadThreshold();
if (threshold == -2) { // user has not set this env variable
threshold = (minCompCap <= 6) ? NCCL_THREAD_THRESHOLD_PREVOLTA : NCCL_THREAD_THRESHOLD;
// multiply by 2 if running on multiple nodes
if (multiNode) {
threshold *= 2;
}
}
return threshold;
}
pthread_mutex_t initLock = PTHREAD_MUTEX_INITIALIZER;
static bool initialized = false;
static ncclResult_t ncclInit() {
if (initialized) return ncclSuccess;
pthread_mutex_lock(&initLock);
if (!initialized) {
initEnv();
initDebug();
initNet();
initialized = true;
}
pthread_mutex_unlock(&initLock);
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclGetVersion, int* version);
ncclResult_t ncclGetVersion(int* version) {
if (version == NULL) return ncclInvalidArgument;
*version = NCCL_VERSION_CODE;
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclGetUniqueId, ncclUniqueId* out);
ncclResult_t ncclGetUniqueId(ncclUniqueId* out) {
NCCLCHECK(ncclInit());
NCCLCHECK(PtrCheck(out, "GetUniqueId", "out"));
return bootstrapGetUniqueId(out);
}
static ncclResult_t commFree(ncclComm_t comm) {
if (comm == NULL)
return ncclSuccess;
CUDACHECK(hipFree(comm->devComm));
for (int ring=0; ring<comm->nRings; ring++)
NCCLCHECK(freeRing(comm->rings+ring));
if (comm->doneEvent != NULL)
CUDACHECK(hipEventDestroy(comm->doneEvent));
if (comm->launchMode == ncclComm::GROUP) {
CUDACHECK(hipStreamDestroy(comm->groupStream));
}
// Last rank frees shared resources between threads
int isLast;
NCCLCHECK(ncclCpuBarrierIn(comm, &isLast));
if (isLast) {
free(comm->intraBarrier);
free(comm->intraParams);
free(comm->intraCudaDevs);
free(comm->intraCGMode);
free(comm->intraCC);
}
free(comm);
return ncclSuccess;
}
static ncclResult_t commAlloc(ncclComm_t* comret, int ndev, int rank) {
if (ndev < 1) {
WARN("invalid device count (%d) requested", ndev);
return ncclInvalidArgument;
}
if (rank >= ndev || rank < 0) {
WARN("rank %d exceeds ndev=%d", rank, ndev);
return ncclInvalidArgument;
}
// Try to create a CUDA object right away. If there is something wrong with
// the device we're on (failure cause #1) , better know it early.
hipEvent_t doneEvent;
CUDACHECK(hipEventCreateWithFlags(&doneEvent, hipEventDisableTiming));
struct ncclComm* comm;
NCCLCHECK(ncclCalloc(&comm, 1));
INFO(NCCL_INIT,"comm %p rank %d nranks %d", comm, rank, ndev);
comm->rank = rank;
comm->nRanks = ndev;
hipGetDevice(&comm->cudaDev);
comm->doneEvent = doneEvent;
comm->llThreshold = ncclParamLlThreshold();
comm->checkPointers = ncclParamCheckPointers() == 1 ? true : false;
#if __CUDACC_VER_MAJOR__ >= 10 || (__CUDACC_VER_MAJOR__ >= 9 && __CUDACC_VER_MINOR__ >= 2)
comm->groupCudaStream = ncclParamGroupCudaStream();
#else
// Don't allow the user to overload the default setting in older CUDA builds
comm->groupCudaStream = NCCL_GROUP_CUDA_STREAM;
#endif
comm->argsptr = &comm->args;
*comret = comm;
return ncclSuccess;
}
static ncclResult_t devCommSetup(ncclComm_t comm) {
// Fully duplicate the comm on the device
NCCLCHECK(ncclCudaCalloc(&comm->devComm, 1));
// Copy the comm on the device
NCCLCHECK(ncclCudaMemcpy(comm->devComm, comm, 1));
// Copy userRanks
for (int r=0; r<comm->nRings; r++) {
NCCLCHECK(ncclCudaMemcpy(comm->rings[r].devUserRanks, comm->rings[r].userRanks, comm->nRanks));
}
return ncclSuccess;
}
// Pre-process the string so that running "strings" on the lib can quickly reveal the version.
#define VERSION_STRING "NCCL version " STR(NCCL_MAJOR) "." STR(NCCL_MINOR) "." STR(NCCL_PATCH) NCCL_SUFFIX "+cuda" STR(CUDA_MAJOR) "." STR(CUDA_MINOR)
static void showVersion() {
static int shown = 0;
if (shown == 0 && ncclDebugLevel >= NCCL_LOG_VERSION) {
printf("%s\n", VERSION_STRING);
fflush(stdout);
if (ncclDebugFile != stdout)
INFO(NCCL_ALL,"%s", VERSION_STRING); // Also log NCCL version in one of the files
shown = 1;
}
}
static ncclResult_t fillInfo(struct ncclInfo* info, int rank) {
for (int t=0; t<NTRANSPORTS; t++) {
NCCLCHECK(ncclTransports[t].fillInfo(info->tinfo+t, rank));
}
return ncclSuccess;
}
template <int type>
static ncclResult_t selectTransport(struct ncclInfo* myInfo, struct ncclInfo* peerInfo, struct ncclConnect* connect, struct ncclTransport** transportRet, struct ncclRing* ring) {
for (int t=0; t<NTRANSPORTS; t++) {
struct ncclTransport *transport = ncclTransports+t;
struct ncclTransportComm* transportComm = type == 1 ? &transport->send : &transport->recv;
ncclTvalue_t ret = 0;
NCCLCHECK(transport->canConnect(&ret, myInfo->tinfo+t, peerInfo->tinfo+t));
if (ret > 0) {
NCCLCHECK(transportComm->setup(myInfo->tinfo+t, peerInfo->tinfo+t, connect, ring));
*transportRet = transport;
return ncclSuccess;
}
}
WARN("No transport found !");
*transportRet = NULL;
return ncclInternalError;
}
static ncclResult_t setupRing(struct ncclComm* comm, int ringid, int rank, int nranks, int* ringRanks, struct ncclInfo* allInfo, struct ncclConnect* connect) {
NCCLCHECK(initRing(comm, ringid));
struct ncclRing* ring = comm->rings+ringid;
// Reorganize ranks to start with rank.
int shift;
for (shift = 0; shift<nranks; shift++) {
if (ringRanks[shift] == rank) {
break;
}
}
for (int i=0; i<nranks; i++) {
ring->userRanks[i] = ringRanks[(i+shift)%nranks];
}
int prev = ring->userRanks[nranks-1];
int next = ring->userRanks[1];
NCCLCHECK(selectTransport<0>(allInfo+rank, allInfo+prev, connect+0, &ring->recv.transport, ring));
NCCLCHECK(selectTransport<1>(allInfo+rank, allInfo+next, connect+1, &ring->send.transport, ring));
NCCLCHECK(transportCreateProxy(0, ring, comm));
NCCLCHECK(transportCreateProxy(1, ring, comm));
return ncclSuccess;
}
static ncclResult_t fillConnect(struct ncclInfo* allInfo, int nranks, int rank, int* connectTransport, ncclTvalue_t* connectValue) {
for (int r=0; r<nranks; r++) {
connectTransport[r] = -1;
for (int t=0; t<NTRANSPORTS; t++) {
NCCLCHECK(ncclTransports[t].canConnect(connectValue+r, allInfo[rank].tinfo+t, allInfo[r].tinfo+t));
if (connectValue[r] > 0) {
connectTransport[r] = t;
break;
}
}
}
return ncclSuccess;
}
static void swap(void* mem1, void* mem2, int size) {
char tmp[size];
memcpy(tmp, mem1, size); memcpy(mem1, mem2, size); memcpy(mem2, tmp, size);
}
#define MAXWIDTH 20
#define PREFIXLEN 15
#define STRLENGTH (PREFIXLEN+5*MAXWIDTH)
void dumpMatrix(int* connectMatrix, int nranks) {
char line[STRLENGTH+1];
line[STRLENGTH] = '\0';
memset(line, ' ', STRLENGTH);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+4*j, " %3d", j);
INFO(NCCL_INIT,"%s", line);
for (int i=0; i<nranks; i++) {
memset(line, ' ', STRLENGTH);
sprintf(line, "%3d ", i);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+4*j, " %3d", connectMatrix[i*nranks+j]);
INFO(NCCL_INIT,"%s", line);
}
}
void dumpMatrixTvalue(ncclTvalue_t* connectMatrix, int nranks) {
char line[STRLENGTH+1];
line[STRLENGTH] = '\0';
memset(line, ' ', STRLENGTH);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+5*j, " %4d", j);
INFO(NCCL_INIT,"%s", line);
for (int i=0; i<nranks; i++) {
memset(line, ' ', STRLENGTH);
sprintf(line, "%3d ", i);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+5*j, " %4o", (int)connectMatrix[i*nranks+j]);
INFO(NCCL_INIT,"%s", line);
}
}
void dumpLine(int* values, int nranks, const char* prefix) {
int prefixlen = strlen(prefix);
char line[STRLENGTH+1];
line[STRLENGTH] = '\0';
memset(line, ' ', STRLENGTH);
strncpy(line, prefix, PREFIXLEN);
for (int i=0; i<nranks && i<MAXWIDTH; i++) sprintf(line+prefixlen+4*i, " %3d", values[i]);
INFO(NCCL_INIT,"%s", line);
}
static ncclResult_t buildRings(int nrings, int* rings, int rank, int nranks, int* prev, int* next) {
for (int r=0; r<nrings; r++) {
char prefix[30];
/*sprintf(prefix, "[%d] Ring %d Prev : ", rank, r);
dumpLine(prev+r*nranks, nranks, prefix);
sprintf(prefix, "[%d] Ring %d Next : ", rank, r);
dumpLine(next+r*nranks, nranks, prefix);*/
int current = rank;
for (int i=0; i<nranks; i++) {
rings[r*nranks+i] = current;
current = next[r*nranks+current];
}
sprintf(prefix, "Ring %02d : ", r);
if (rank == 0) dumpLine(rings+r*nranks, nranks, prefix);
if (current != rank) {
WARN("Error : ring %d does not loop back to start (%d != %d)", r, current, rank);
return ncclInternalError;
}
// Check that all ranks are there
for (int i=0; i<nranks; i++) {
int found = 0;
for (int j=0; j<nranks; j++) {
if (rings[r*nranks+j] == i) {
found = 1;
break;
}
}
if (found == 0) {
WARN("Error : ring %d does not contain rank %d", r, i);
return ncclInternalError;
}
}
}
return ncclSuccess;
}
void* waitForNonNullPtr(void* p) {
volatile void** ptr = (volatile void**) p;
while (*ptr == NULL) sched_yield();
return (void*)*ptr;
}
ncclResult_t initParams(struct ncclComm* comm) {
struct cudaLaunchParams* params = comm->myParams = comm->intraParams+comm->intraRank;
params->args = &comm->argsptr;
params->stream = NULL;
params->sharedMem = 0;
params->blockDim.x = 0; params->blockDim.y = params->blockDim.z = 1;
params->gridDim.x = 0; params->gridDim.y = params->gridDim.z = 1;
return ncclSuccess;
}
// Allocate/Set Intra Process Structures and set CG options
ncclResult_t ncclCommSetIntra(struct ncclComm* comm, int rank, int ranks, struct ncclComm* comm0) {
comm->intraRank = rank;
comm->intraRanks = ranks;
comm->intraPhase = 0;
// Alloc shared structures
if (rank == 0) {
assert(comm == comm0);
int* bar;
NCCLCHECK(ncclCalloc(&bar, 2));
bar[0] = bar[1] = 0;
comm->intraBarrier = bar;
NCCLCHECK(ncclCalloc(&comm->intraParams, comm->intraRanks));
NCCLCHECK(ncclCalloc(&comm->intraCudaDevs, comm->intraRanks));
int* CGMode;
NCCLCHECK(ncclCalloc(&CGMode, 1));
*CGMode = 0x11;
comm->intraCGMode = CGMode;
int* CC;
NCCLCHECK(ncclCalloc(&CC, 1));
*CC = ncclCudaFullCompCap();
comm->intraCC = CC;
} else {
comm->intraBarrier = (int*)waitForNonNullPtr(&comm0->intraBarrier);
comm->intraParams = (struct cudaLaunchParams*)waitForNonNullPtr(&comm0->intraParams);
comm->intraCudaDevs = (int*)waitForNonNullPtr(&comm0->intraCudaDevs);
comm->intraCGMode = (int*)waitForNonNullPtr(&comm0->intraCGMode);
comm->intraCC = (int*)waitForNonNullPtr(&comm0->intraCC);
}
comm->intraCudaDevs[comm->intraRank] = comm->cudaDev;
NCCLCHECK(initParams(comm));
int cgMdLaunch = 0;
// Set CG Mode
comm->launchMode = ncclComm::GROUP;
char* str = getenv("NCCL_LAUNCH_MODE");
if (comm->intraRanks == 1 || (str && strcmp(str, "PARALLEL") == 0)) {
comm->launchMode = ncclComm::PARALLEL;
}
if (comm->launchMode == ncclComm::GROUP) {
CUDACHECK(hipStreamCreateWithFlags(&comm->groupStream, hipStreamNonBlocking));
#if __CUDACC_VER_MAJOR__ >= 9
if (*comm->intraCC && (ncclCudaFullCompCap() == *comm->intraCC)) {
// Check whether the GPU supports Cooperative Group Multi Device Launch
(void) hipDeviceGetAttribute(&cgMdLaunch, hipDeviceAttributeCooperativeMultiDeviceLaunch, comm->cudaDev);
}
#endif
}
// Disable cgMdLaunch if any rank does not support it
if (cgMdLaunch == 0) {
*comm->intraCGMode = 0x10;
}
return ncclSuccess;
}
static ncclResult_t initTransportsRank(struct ncclComm* comm, ncclUniqueId* commId) {
int rank = comm->rank;
int nranks = comm->nRanks;
void* commState;
NCCLCHECK(bootstrapInit(commId, rank, nranks, &commState));
struct ncclInfo* allInfo;
NCCLCHECK(ncclCalloc(&allInfo, nranks));
NCCLCHECK(fillInfo(allInfo+rank, rank));
NCCLCHECK(bootstrapAllGather(commState, allInfo, sizeof(struct ncclInfo)));
int* connectTransport;
ncclTvalue_t* connectValue;
NCCLCHECK(ncclCalloc(&connectTransport, nranks*nranks));
NCCLCHECK(ncclCalloc(&connectValue, nranks*nranks));
NCCLCHECK(fillConnect(allInfo, nranks, rank, connectTransport+nranks*rank, connectValue+nranks*rank));
NCCLCHECK(bootstrapAllGather(commState, connectTransport, nranks*(sizeof(int))));
NCCLCHECK(bootstrapAllGather(commState, connectValue, nranks*(sizeof(ncclTvalue_t))));
//if (rank == 0) dumpMatrix(connectTransport, nranks);
//if (rank == 0) dumpMatrixTvalue(connectValue, nranks);
// Get my rings
int nrings;
int* prev, *next;
NCCLCHECK(ncclCalloc(&prev, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&next, nranks*MAXRINGS));
comm->nThreads = getDefaultThreads();
NCCLCHECK(ncclGetRings(&nrings, &comm->nThreads, rank, nranks, connectTransport, connectValue, prev, next));
free(connectTransport);
free(connectValue);
// Find max nThreads
int allData[nranks];
allData[rank] = comm->nThreads;
NCCLCHECK(bootstrapAllGather(commState, allData, sizeof(int)));
for (int i=0; i<nranks; i++)
comm->nThreads = ::max(allData[i], comm->nThreads);
if (rank == 0) INFO(NCCL_INIT,"Using %d threads", comm->nThreads);
// Determine the minimum CUDA Compute capability of all GPUs
int myCompCap = ncclCudaCompCap();
int minCompCap = myCompCap;
allData[rank] = myCompCap;
NCCLCHECK(bootstrapAllGather(commState, allData, sizeof(int)));
for (int i=0; i<nranks; i++)
minCompCap = ::min(allData[i], minCompCap);
if (rank == 0) INFO(NCCL_INIT,"Min Comp Cap %d", minCompCap);
// Find min nrings across ranks
allData[rank] = nrings;
NCCLCHECK(bootstrapAllGather(commState, allData, sizeof(int)));
for (int i=0; i<nranks; i++)
nrings = ::min(allData[i], nrings);
// Exchange data with others to build complete rings
comm->nRings = nrings;
for (int r=0; r<nrings; r++) {
NCCLCHECK(bootstrapAllGather(commState, prev+r*nranks, sizeof(int)));
NCCLCHECK(bootstrapAllGather(commState, next+r*nranks, sizeof(int)));
}
int *rings;
NCCLCHECK(ncclCalloc(&rings, nranks*MAXRINGS));
NCCLCHECK(buildRings(nrings, rings, rank, nranks, prev, next));
free(prev);
free(next);
// Connect with prev/next for each ring
struct ncclConnect *connectData;
NCCLCHECK(ncclCalloc(&connectData, 2*nranks));
for (int r=0; r<nrings; r++) {
int* ringRanks = rings+r*nranks;
struct ncclRing *ring = comm->rings+r;
NCCLCHECK(setupRing(comm, r, rank, nranks, ringRanks, allInfo, connectData+2*rank));
int prev_offset = ring->userRanks[nranks-1]*2+1;
int next_offset = ring->userRanks[1]*2;
NCCLCHECK(bootstrapAllGather(commState, connectData, sizeof(struct ncclConnect)*2));
NCCLCHECK(ring->send.transport->send.connect(connectData+next_offset, &ring->send));
NCCLCHECK(ring->recv.transport->recv.connect(connectData+prev_offset, &ring->recv));
}
free(connectData);
free(rings);
free(allInfo);
// Intra-process barrier setup
struct rankInfo {
uint64_t hostHash;
uint64_t pidHash;
struct ncclComm* comm;
} rankInfos[nranks];
rankInfos[rank].hostHash = getHostHash();
rankInfos[rank].pidHash = getPidHash();
rankInfos[rank].comm = comm;
NCCLCHECK(bootstrapAllGather(commState, rankInfos, sizeof(struct rankInfo)));
// Compute intra ranks
int intraRank0 = -1, intraRank = -1, intraRanks = 0;
int multiNode = 0;
for (int r=0; r<nranks; r++) {
if ((rankInfos[r].hostHash == rankInfos[rank].hostHash) &&
(rankInfos[r].pidHash == rankInfos[rank].pidHash)) {
if (intraRanks == 0) intraRank0 = r;
if (r == rank) intraRank = intraRanks;
intraRanks++;
} else if (rankInfos[r].hostHash != rankInfos[rank].hostHash) {
multiNode = 1;
}
}
TRACE(NCCL_INIT,"hostHash[%d] %lx intraRank %d intraRanks %d intraRank0 %d",
rank, rankInfos[rank].hostHash, intraRank, intraRanks, intraRank0);
if (intraRank == -1 || intraRank0 == -1 || rankInfos[intraRank0].comm == NULL) {
WARN("Failed to determine intra ranks hostHash[%d] %lx intraRank %d intraRanks %d intraRank0 %d",
rank, rankInfos[rank].hostHash, intraRank, intraRanks, intraRank0);
return ncclInternalError;
}
NCCLCHECK(ncclCommSetIntra(comm, intraRank, intraRanks, rankInfos[intraRank0].comm));
// Determine thread threshold across all GPUs
comm->threadThreshold = ncclThreadThreshold(minCompCap, multiNode);
// Barrier
bootstrapClose(commState);
return ncclSuccess;
}
bool SetCpuAffinity(int cudaDev, uint32_t* nvmlDevice) {
char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE];
if (hipDeviceGetPCIBusId(busId, NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE, cudaDev) != hipSuccess) return false;
if (wrapNvmlDeviceGetHandleByPciBusId(busId, nvmlDevice) != ncclSuccess) return false;
if (wrapNvmlDeviceSetCpuAffinity(*nvmlDevice) != ncclSuccess) {
WARN("Failed to set CPU affinity");
return false;
}
return true;
}
ncclResult_t ncclCommInitRankSync(ncclComm_t* newcomm, int nranks, ncclUniqueId commId, int myrank) {
cpu_set_t affinitySave;
sched_getaffinity(0, sizeof(cpu_set_t), &affinitySave);
NCCLCHECK(wrapNvmlSymbols());
NCCLCHECK(wrapNvmlInit());
// Make sure all host memory allocation are close to the GPU
int cudaDev;
uint32_t nvmlDevice;
CUDACHECK(hipGetDevice(&cudaDev));
SetCpuAffinity(cudaDev, &nvmlDevice);
ncclResult_t res;
NCCLCHECKGOTO(commAlloc(newcomm, nranks, myrank), res, cleanup);
NCCLCHECKGOTO(initTransportsRank(*newcomm, &commId), res, cleanup);
NCCLCHECKGOTO(devCommSetup(*newcomm), res, cleanup);
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
NCCLCHECKGOTO(wrapNvmlShutdown(), res, cleanup);
INFO(NCCL_INIT,"comm %p rank %d nranks %d - COMPLETE", *newcomm, myrank, nranks);
return ncclSuccess;
cleanup:
*newcomm = NULL;
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
return res;
}
NCCL_API(ncclResult_t, ncclCommInitRank, ncclComm_t* newcomm, int nranks, ncclUniqueId commId, int myrank);
ncclResult_t ncclCommInitRank(ncclComm_t* newcomm, int nranks, ncclUniqueId commId, int myrank) {
char* env = getenv("NCCL_COMM_ID");
if (env && myrank == 0) {
NCCLCHECK(bootstrapCreateRoot(&commId, true));
}
NCCLCHECK(ncclInit());
if (myrank == 0) showVersion();
INFO(NCCL_INIT,"rank %d nranks %d", myrank, nranks);
// Make sure the CUDA runtime is initialized.
CUDACHECK(hipFree(NULL));
NCCLCHECK(PtrCheck(newcomm, "CommInitRank", "newcomm"));
if (nranks < 1 || myrank < 0 || myrank >= nranks) {
WARN("Invalid rank requested : %d/%d", myrank, nranks);
return ncclInvalidArgument;
}
if (ncclAsyncMode()) {
int cudaDev;
CUDACHECK(hipGetDevice(&cudaDev));
return ncclAsyncInit(ncclCommInitRankSync, cudaDev, newcomm, nranks, commId, myrank);
} else {
return ncclCommInitRankSync(newcomm, nranks, commId, myrank);
}
}
static ncclResult_t initTransportsAll(struct ncclComm** comms, const int* devs, int nranks) {
struct ncclInfo* allInfo;
NCCLCHECK(ncclCalloc(&allInfo, nranks));
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(hipSetDevice(devs[rank]));
NCCLCHECK(fillInfo(allInfo+rank, rank));
}
int* connectTransport;
ncclTvalue_t* connectValue;
NCCLCHECK(ncclCalloc(&connectTransport, nranks*nranks));
NCCLCHECK(ncclCalloc(&connectValue, nranks*nranks));
for (int rank=0; rank<nranks; rank++)
NCCLCHECK(fillConnect(allInfo, nranks, rank, connectTransport+nranks*rank, connectValue+nranks*rank));
int* prev, *prevFinal, *next, *nextFinal;
NCCLCHECK(ncclCalloc(&prev, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&prevFinal, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&next, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&nextFinal, nranks*MAXRINGS));
int nrings = MAXRINGS;
int nthreads=0;
int myCompCap = ncclCudaCompCap();
int minCompCap = myCompCap;
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(hipSetDevice(devs[rank]));
int nringsRank;
int nthreadsRank = getDefaultThreads();
myCompCap = ncclCudaCompCap();
NCCLCHECK(ncclGetRings(&nringsRank, &nthreadsRank, rank, nranks, connectTransport, connectValue, prev, next));
nrings = ::min(nrings, nringsRank);
nthreads = ::max(nthreads, nthreadsRank);
minCompCap = ::min(minCompCap, myCompCap);
for (int ring=0; ring<nrings; ring++) {
int index = ring*nranks+rank;
prevFinal[index] = prev[index];
nextFinal[index] = next[index];
}
}
free(connectTransport);
free(connectValue);
free(prev);
free(next);
INFO(NCCL_INIT,"Using %d threads", nthreads);
INFO(NCCL_INIT,"Min Comp Cap %d", minCompCap);
int* rings;
NCCLCHECK(ncclCalloc(&rings, nranks*MAXRINGS));
NCCLCHECK(buildRings(nrings, rings, 0, nranks, prevFinal, nextFinal));
free(prevFinal);
free(nextFinal);
// Determine thread threshold across all GPUs
int threadThreshold = ncclThreadThreshold(minCompCap, 0);
for (int rank=0; rank<nranks; rank++) {
comms[rank]->nRings = nrings;
comms[rank]->nThreads = nthreads;
comms[rank]->threadThreshold = threadThreshold;
}
for (int r=0; r<nrings; r++) {
struct ncclConnect connect[2*nranks];
int* ringRanks = rings+r*nranks;
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(hipSetDevice(devs[rank]));
NCCLCHECK(setupRing(comms[rank], r, rank, nranks, ringRanks, allInfo, connect+2*rank));
}
// RingExchange connect information
for (int rank=0; rank<nranks; rank++) {
// Swap rank->prev and prevRank->next
struct ncclRing *ring = comms[rank]->rings+r;
int prevRank = ring->userRanks[nranks-1];
struct ncclConnect* prevRankNextConnect = connect+2*prevRank+1;
struct ncclConnect* rankPrevConnect = connect+2*rank;
swap(prevRankNextConnect, rankPrevConnect, sizeof(struct ncclConnect));
}
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(hipSetDevice(devs[rank]));
struct ncclRing *ring = comms[rank]->rings+r;
NCCLCHECK(ring->send.transport->send.connect(connect+2*rank+1, &ring->send));
NCCLCHECK(ring->recv.transport->recv.connect(connect+2*rank+0, &ring->recv));
}
}
free(rings);
free(allInfo);
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclCommInitAll, ncclComm_t* comms, int ndev, const int* devlist);
ncclResult_t ncclCommInitAll(ncclComm_t* comms, int ndev, const int* devlist) {
NCCLCHECK(ncclInit());
NCCLCHECK(wrapNvmlSymbols());
NCCLCHECK(wrapNvmlInit());
showVersion();
INFO(NCCL_INIT,"nranks %d", ndev);
NCCLCHECK(PtrCheck(comms, "CommInitAll", "comms"));
if (ndev < 1) {
WARN("Invalid device count requested : %d", ndev);
return ncclInvalidArgument;
}
ncclResult_t res;
int savedDevice;
int rank, cudaDev;
ncclComm_t comm = NULL;
uint32_t nvmlDevice;
int ncclDevList[ndev];
for (int i=0; i<ndev; i++) {
ncclDevList[i] = devlist ? devlist[i] : i;
}
hipGetDevice(&savedDevice);
for(rank=0; rank<ndev; ++rank)
comms[rank] = NULL;
cpu_set_t affinitySave;
sched_getaffinity(0, sizeof(cpu_set_t), &affinitySave);
for (rank=0; rank<ndev; ++rank) {
cudaDev = ncclDevList[rank];
CUDACHECKGOTO(hipSetDevice(cudaDev), res, cleanup);
SetCpuAffinity(cudaDev, &nvmlDevice);
NCCLCHECKGOTO(commAlloc(&comm, ndev, rank), res, cleanup);
comms[rank] = comm;
NCCLCHECKGOTO(ncclCommSetIntra(comm, rank, ndev, comms[0]), res, cleanup);
}
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
NCCLCHECKGOTO(initTransportsAll(comms, ncclDevList, ndev), res, cleanup);
for(rank=0; rank<ndev; ++rank) {
cudaDev = ncclDevList[rank];
CUDACHECKGOTO(hipSetDevice(cudaDev), res, cleanup);
NCCLCHECKGOTO(devCommSetup(comms[rank]), res, cleanup);
}
res = ncclSuccess;
goto final;
cleanup:
for(rank=0; rank<ndev; ++rank) {
if(comms[rank] != NULL) {
commFree(comms[rank]);
}
}
final:
if(wrapNvmlShutdown() != ncclSuccess)
INFO(NCCL_INIT,"NCCL did not shutdown nvml properly");
hipSetDevice(savedDevice);
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
return res;
}
NCCL_API(ncclResult_t, ncclCommDestroy, ncclComm_t comm);
ncclResult_t ncclCommDestroy(ncclComm_t comm) {
if (comm == NULL)
return ncclSuccess;
int savedDevice;
CUDACHECK(hipGetDevice(&savedDevice));
int commDevice = comm->cudaDev;
if (savedDevice != commDevice) {
CUDACHECK(hipSetDevice(commDevice));
}
NCCLCHECK(commFree(comm));
if (savedDevice != commDevice)
CUDACHECK(hipSetDevice(savedDevice));
return ncclSuccess;
}
NCCL_API(const char*, ncclGetErrorString, ncclResult_t code);
const char* ncclGetErrorString(ncclResult_t code) {
switch (code) {
case ncclSuccess : return "no error";
case ncclUnhandledCudaError : return "unhandled cuda error";
case ncclSystemError : return "unhandled system error";
case ncclInternalError : return "internal error";
case ncclInvalidArgument : return "invalid argument";
case ncclInvalidUsage : return "invalid usage";
default : return "unknown result code";
}
}
NCCL_API(ncclResult_t, ncclCommCount, const ncclComm_t comm, int* count);
ncclResult_t ncclCommCount(const ncclComm_t comm, int* count) {
NCCLCHECK(PtrCheck(comm, "CommCount", "comm"));
NCCLCHECK(PtrCheck(count, "CommCount", "count"));
*count = comm->nRanks;
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclCommCuDevice, const ncclComm_t comm, int* devid);
ncclResult_t ncclCommCuDevice(const ncclComm_t comm, int* devid) {
NCCLCHECK(PtrCheck(comm, "CommCuDevice", "comm"));
NCCLCHECK(PtrCheck(devid, "CommCuDevice", "devid"));
*devid = comm->cudaDev;
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclCommUserRank, const ncclComm_t comm, int* rank);
ncclResult_t ncclCommUserRank(const ncclComm_t comm, int* rank) {
NCCLCHECK(PtrCheck(comm, "CommUserRank", "comm"));
NCCLCHECK(PtrCheck(rank, "CommUserRank", "rank"));
*rank = comm->rank;
return ncclSuccess;
}
|
893c92954978e24405674e555345bc2cbfafef6d.cu
|
/*************************************************************************
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "nccl.h"
#include "core.h"
#include "ring.h"
#include "param.h"
#include "nvmlwrap.h"
#include "rings.h"
#include "bootstrap.h"
#include "transport.h"
#include "common_coll.h"
#include "group.h"
#include "utils.h"
#include "net.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sched.h>
#include <fcntl.h>
#include <unistd.h>
#include <cuda_runtime.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <dlfcn.h>
#define STR2(v) #v
#define STR(v) STR2(v)
int ncclDebugLevel;
uint64_t ncclDebugMask = NCCL_INIT; // Default debug sub-system mask is INIT
pthread_mutex_t ncclDebugOutputLock;
FILE *ncclDebugFile = stdout;
#ifdef ENABLE_TRACE
std::chrono::high_resolution_clock::time_point ncclEpoch;
#endif
#if __CUDACC_VER_MAJOR__ >= 10 || (__CUDACC_VER_MAJOR__ >= 9 && __CUDACC_VER_MINOR__ >= 2)
#define NCCL_GROUP_CUDA_STREAM 0 // CGMD: CUDA 9.2,10.X Don't need to use an internal CUDA stream
#else
#define NCCL_GROUP_CUDA_STREAM 1 // CGMD: CUDA 9.0,9.1 Need to use an internal CUDA stream
#endif
NCCL_PARAM(GroupCudaStream, "GROUP_CUDA_STREAM", NCCL_GROUP_CUDA_STREAM);
NCCL_PARAM(CheckPointers, "CHECK_POINTERS", 0);
ncclNet_t* ncclNet = NULL;
// We define this as weak to let tests redefine their own
#pragma weak ncclCudaCompCap
int ncclCudaCompCap() {
int cudaDev;
if (cudaGetDevice(&cudaDev) != cudaSuccess) return 0;
int ccMajor;
if (cudaDeviceGetAttribute(&ccMajor, cudaDevAttrComputeCapabilityMajor, cudaDev) != cudaSuccess) return 0;
return ccMajor;
}
int ncclCudaFullCompCap() {
int cudaDev;
if (cudaGetDevice(&cudaDev) != cudaSuccess) return 0;
int ccMajor, ccMinor;
if (cudaDeviceGetAttribute(&ccMajor, cudaDevAttrComputeCapabilityMajor, cudaDev) != cudaSuccess) return 0;
if (cudaDeviceGetAttribute(&ccMinor, cudaDevAttrComputeCapabilityMinor, cudaDev) != cudaSuccess) return 0;
return ccMajor*10+ccMinor;
}
// Returns ncclInternalError if anything fails, causing that network to be ignored.
ncclResult_t initNet(ncclNet_t* net) {
int ndev;
if (net->init(ncclDebugLog) != ncclSuccess) return ncclInternalError;
if (net->devices(&ndev) != ncclSuccess) return ncclInternalError;
if (ndev <= 0) {
INFO(NCCL_INIT|NCCL_NET, "Net/%s: call to devices() returned 0 devices.", net->name);
return ncclSystemError;
}
return ncclSuccess;
}
ncclResult_t initNetPlugin(ncclNet_t** net) {
void* netPluginLib = dlopen("libnccl-net.so", RTLD_NOW | RTLD_LOCAL);
if (netPluginLib == NULL) {
// dlopen does not guarantee to set errno, but dlerror only gives us a
// string, so checking errno doesn't hurt to try to provide a better
// error message
if (errno == ENOENT) {
INFO(NCCL_INIT|NCCL_NET, "No network plugin found.");
} else {
INFO(NCCL_INIT|NCCL_NET, "Unable to load libnccl-net.so : %s", dlerror());
}
return ncclSuccess;
}
ncclNet_t* extNet = (ncclNet_t*) dlsym(netPluginLib, STR(NCCL_PLUGIN_SYMBOL));
if (extNet == NULL) {
INFO(NCCL_INIT|NCCL_NET, "NetPlugin: could not find " STR(NCCL_PLUGIN_SYMBOL) " symbol");
goto cleanup;
}
if (initNet(extNet) == ncclSuccess) {
*net = extNet;
return ncclSuccess;
}
cleanup:
if (netPluginLib != NULL) dlclose(netPluginLib);
return ncclSuccess;
}
ncclResult_t initNet() {
// Always initialize sockets as we use it for bootstrap
NCCLCHECK(initNet(&ncclNetSocket));
NCCLCHECK(initNetPlugin(&ncclNet));
if (ncclNet != NULL) {
INFO(NCCL_INIT|NCCL_NET, "Using network plugin %s", ncclNetName());
return ncclSuccess;
}
if (initNet(&ncclNetIb) == ncclSuccess) {
ncclNet = &ncclNetIb;
} else {
ncclNet = &ncclNetSocket;
}
INFO(NCCL_INIT|NCCL_NET,"Using network %s", ncclNetName());
return ncclSuccess;
}
NCCL_PARAM(LlThreshold, "LL_THRESHOLD", -2);
NCCL_PARAM(ThreadThreshold, "THREAD_THRESHOLD", -2);
int ncclThreadThreshold(int minCompCap, int multiNode) {
int threshold = ncclParamThreadThreshold();
if (threshold == -2) { // user has not set this env variable
threshold = (minCompCap <= 6) ? NCCL_THREAD_THRESHOLD_PREVOLTA : NCCL_THREAD_THRESHOLD;
// multiply by 2 if running on multiple nodes
if (multiNode) {
threshold *= 2;
}
}
return threshold;
}
pthread_mutex_t initLock = PTHREAD_MUTEX_INITIALIZER;
static bool initialized = false;
static ncclResult_t ncclInit() {
if (initialized) return ncclSuccess;
pthread_mutex_lock(&initLock);
if (!initialized) {
initEnv();
initDebug();
initNet();
initialized = true;
}
pthread_mutex_unlock(&initLock);
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclGetVersion, int* version);
ncclResult_t ncclGetVersion(int* version) {
if (version == NULL) return ncclInvalidArgument;
*version = NCCL_VERSION_CODE;
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclGetUniqueId, ncclUniqueId* out);
ncclResult_t ncclGetUniqueId(ncclUniqueId* out) {
NCCLCHECK(ncclInit());
NCCLCHECK(PtrCheck(out, "GetUniqueId", "out"));
return bootstrapGetUniqueId(out);
}
static ncclResult_t commFree(ncclComm_t comm) {
if (comm == NULL)
return ncclSuccess;
CUDACHECK(cudaFree(comm->devComm));
for (int ring=0; ring<comm->nRings; ring++)
NCCLCHECK(freeRing(comm->rings+ring));
if (comm->doneEvent != NULL)
CUDACHECK(cudaEventDestroy(comm->doneEvent));
if (comm->launchMode == ncclComm::GROUP) {
CUDACHECK(cudaStreamDestroy(comm->groupStream));
}
// Last rank frees shared resources between threads
int isLast;
NCCLCHECK(ncclCpuBarrierIn(comm, &isLast));
if (isLast) {
free(comm->intraBarrier);
free(comm->intraParams);
free(comm->intraCudaDevs);
free(comm->intraCGMode);
free(comm->intraCC);
}
free(comm);
return ncclSuccess;
}
static ncclResult_t commAlloc(ncclComm_t* comret, int ndev, int rank) {
if (ndev < 1) {
WARN("invalid device count (%d) requested", ndev);
return ncclInvalidArgument;
}
if (rank >= ndev || rank < 0) {
WARN("rank %d exceeds ndev=%d", rank, ndev);
return ncclInvalidArgument;
}
// Try to create a CUDA object right away. If there is something wrong with
// the device we're on (failure cause #1) , better know it early.
cudaEvent_t doneEvent;
CUDACHECK(cudaEventCreateWithFlags(&doneEvent, cudaEventDisableTiming));
struct ncclComm* comm;
NCCLCHECK(ncclCalloc(&comm, 1));
INFO(NCCL_INIT,"comm %p rank %d nranks %d", comm, rank, ndev);
comm->rank = rank;
comm->nRanks = ndev;
cudaGetDevice(&comm->cudaDev);
comm->doneEvent = doneEvent;
comm->llThreshold = ncclParamLlThreshold();
comm->checkPointers = ncclParamCheckPointers() == 1 ? true : false;
#if __CUDACC_VER_MAJOR__ >= 10 || (__CUDACC_VER_MAJOR__ >= 9 && __CUDACC_VER_MINOR__ >= 2)
comm->groupCudaStream = ncclParamGroupCudaStream();
#else
// Don't allow the user to overload the default setting in older CUDA builds
comm->groupCudaStream = NCCL_GROUP_CUDA_STREAM;
#endif
comm->argsptr = &comm->args;
*comret = comm;
return ncclSuccess;
}
static ncclResult_t devCommSetup(ncclComm_t comm) {
// Fully duplicate the comm on the device
NCCLCHECK(ncclCudaCalloc(&comm->devComm, 1));
// Copy the comm on the device
NCCLCHECK(ncclCudaMemcpy(comm->devComm, comm, 1));
// Copy userRanks
for (int r=0; r<comm->nRings; r++) {
NCCLCHECK(ncclCudaMemcpy(comm->rings[r].devUserRanks, comm->rings[r].userRanks, comm->nRanks));
}
return ncclSuccess;
}
// Pre-process the string so that running "strings" on the lib can quickly reveal the version.
#define VERSION_STRING "NCCL version " STR(NCCL_MAJOR) "." STR(NCCL_MINOR) "." STR(NCCL_PATCH) NCCL_SUFFIX "+cuda" STR(CUDA_MAJOR) "." STR(CUDA_MINOR)
static void showVersion() {
static int shown = 0;
if (shown == 0 && ncclDebugLevel >= NCCL_LOG_VERSION) {
printf("%s\n", VERSION_STRING);
fflush(stdout);
if (ncclDebugFile != stdout)
INFO(NCCL_ALL,"%s", VERSION_STRING); // Also log NCCL version in one of the files
shown = 1;
}
}
static ncclResult_t fillInfo(struct ncclInfo* info, int rank) {
for (int t=0; t<NTRANSPORTS; t++) {
NCCLCHECK(ncclTransports[t].fillInfo(info->tinfo+t, rank));
}
return ncclSuccess;
}
template <int type>
static ncclResult_t selectTransport(struct ncclInfo* myInfo, struct ncclInfo* peerInfo, struct ncclConnect* connect, struct ncclTransport** transportRet, struct ncclRing* ring) {
for (int t=0; t<NTRANSPORTS; t++) {
struct ncclTransport *transport = ncclTransports+t;
struct ncclTransportComm* transportComm = type == 1 ? &transport->send : &transport->recv;
ncclTvalue_t ret = 0;
NCCLCHECK(transport->canConnect(&ret, myInfo->tinfo+t, peerInfo->tinfo+t));
if (ret > 0) {
NCCLCHECK(transportComm->setup(myInfo->tinfo+t, peerInfo->tinfo+t, connect, ring));
*transportRet = transport;
return ncclSuccess;
}
}
WARN("No transport found !");
*transportRet = NULL;
return ncclInternalError;
}
static ncclResult_t setupRing(struct ncclComm* comm, int ringid, int rank, int nranks, int* ringRanks, struct ncclInfo* allInfo, struct ncclConnect* connect) {
NCCLCHECK(initRing(comm, ringid));
struct ncclRing* ring = comm->rings+ringid;
// Reorganize ranks to start with rank.
int shift;
for (shift = 0; shift<nranks; shift++) {
if (ringRanks[shift] == rank) {
break;
}
}
for (int i=0; i<nranks; i++) {
ring->userRanks[i] = ringRanks[(i+shift)%nranks];
}
int prev = ring->userRanks[nranks-1];
int next = ring->userRanks[1];
NCCLCHECK(selectTransport<0>(allInfo+rank, allInfo+prev, connect+0, &ring->recv.transport, ring));
NCCLCHECK(selectTransport<1>(allInfo+rank, allInfo+next, connect+1, &ring->send.transport, ring));
NCCLCHECK(transportCreateProxy(0, ring, comm));
NCCLCHECK(transportCreateProxy(1, ring, comm));
return ncclSuccess;
}
static ncclResult_t fillConnect(struct ncclInfo* allInfo, int nranks, int rank, int* connectTransport, ncclTvalue_t* connectValue) {
for (int r=0; r<nranks; r++) {
connectTransport[r] = -1;
for (int t=0; t<NTRANSPORTS; t++) {
NCCLCHECK(ncclTransports[t].canConnect(connectValue+r, allInfo[rank].tinfo+t, allInfo[r].tinfo+t));
if (connectValue[r] > 0) {
connectTransport[r] = t;
break;
}
}
}
return ncclSuccess;
}
static void swap(void* mem1, void* mem2, int size) {
char tmp[size];
memcpy(tmp, mem1, size); memcpy(mem1, mem2, size); memcpy(mem2, tmp, size);
}
#define MAXWIDTH 20
#define PREFIXLEN 15
#define STRLENGTH (PREFIXLEN+5*MAXWIDTH)
void dumpMatrix(int* connectMatrix, int nranks) {
char line[STRLENGTH+1];
line[STRLENGTH] = '\0';
memset(line, ' ', STRLENGTH);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+4*j, " %3d", j);
INFO(NCCL_INIT,"%s", line);
for (int i=0; i<nranks; i++) {
memset(line, ' ', STRLENGTH);
sprintf(line, "%3d ", i);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+4*j, " %3d", connectMatrix[i*nranks+j]);
INFO(NCCL_INIT,"%s", line);
}
}
void dumpMatrixTvalue(ncclTvalue_t* connectMatrix, int nranks) {
char line[STRLENGTH+1];
line[STRLENGTH] = '\0';
memset(line, ' ', STRLENGTH);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+5*j, " %4d", j);
INFO(NCCL_INIT,"%s", line);
for (int i=0; i<nranks; i++) {
memset(line, ' ', STRLENGTH);
sprintf(line, "%3d ", i);
for (int j=0; j<nranks && j<MAXWIDTH; j++) sprintf(4+line+5*j, " %4o", (int)connectMatrix[i*nranks+j]);
INFO(NCCL_INIT,"%s", line);
}
}
void dumpLine(int* values, int nranks, const char* prefix) {
int prefixlen = strlen(prefix);
char line[STRLENGTH+1];
line[STRLENGTH] = '\0';
memset(line, ' ', STRLENGTH);
strncpy(line, prefix, PREFIXLEN);
for (int i=0; i<nranks && i<MAXWIDTH; i++) sprintf(line+prefixlen+4*i, " %3d", values[i]);
INFO(NCCL_INIT,"%s", line);
}
static ncclResult_t buildRings(int nrings, int* rings, int rank, int nranks, int* prev, int* next) {
for (int r=0; r<nrings; r++) {
char prefix[30];
/*sprintf(prefix, "[%d] Ring %d Prev : ", rank, r);
dumpLine(prev+r*nranks, nranks, prefix);
sprintf(prefix, "[%d] Ring %d Next : ", rank, r);
dumpLine(next+r*nranks, nranks, prefix);*/
int current = rank;
for (int i=0; i<nranks; i++) {
rings[r*nranks+i] = current;
current = next[r*nranks+current];
}
sprintf(prefix, "Ring %02d : ", r);
if (rank == 0) dumpLine(rings+r*nranks, nranks, prefix);
if (current != rank) {
WARN("Error : ring %d does not loop back to start (%d != %d)", r, current, rank);
return ncclInternalError;
}
// Check that all ranks are there
for (int i=0; i<nranks; i++) {
int found = 0;
for (int j=0; j<nranks; j++) {
if (rings[r*nranks+j] == i) {
found = 1;
break;
}
}
if (found == 0) {
WARN("Error : ring %d does not contain rank %d", r, i);
return ncclInternalError;
}
}
}
return ncclSuccess;
}
void* waitForNonNullPtr(void* p) {
volatile void** ptr = (volatile void**) p;
while (*ptr == NULL) sched_yield();
return (void*)*ptr;
}
ncclResult_t initParams(struct ncclComm* comm) {
struct cudaLaunchParams* params = comm->myParams = comm->intraParams+comm->intraRank;
params->args = &comm->argsptr;
params->stream = NULL;
params->sharedMem = 0;
params->blockDim.x = 0; params->blockDim.y = params->blockDim.z = 1;
params->gridDim.x = 0; params->gridDim.y = params->gridDim.z = 1;
return ncclSuccess;
}
// Allocate/Set Intra Process Structures and set CG options
ncclResult_t ncclCommSetIntra(struct ncclComm* comm, int rank, int ranks, struct ncclComm* comm0) {
comm->intraRank = rank;
comm->intraRanks = ranks;
comm->intraPhase = 0;
// Alloc shared structures
if (rank == 0) {
assert(comm == comm0);
int* bar;
NCCLCHECK(ncclCalloc(&bar, 2));
bar[0] = bar[1] = 0;
comm->intraBarrier = bar;
NCCLCHECK(ncclCalloc(&comm->intraParams, comm->intraRanks));
NCCLCHECK(ncclCalloc(&comm->intraCudaDevs, comm->intraRanks));
int* CGMode;
NCCLCHECK(ncclCalloc(&CGMode, 1));
*CGMode = 0x11;
comm->intraCGMode = CGMode;
int* CC;
NCCLCHECK(ncclCalloc(&CC, 1));
*CC = ncclCudaFullCompCap();
comm->intraCC = CC;
} else {
comm->intraBarrier = (int*)waitForNonNullPtr(&comm0->intraBarrier);
comm->intraParams = (struct cudaLaunchParams*)waitForNonNullPtr(&comm0->intraParams);
comm->intraCudaDevs = (int*)waitForNonNullPtr(&comm0->intraCudaDevs);
comm->intraCGMode = (int*)waitForNonNullPtr(&comm0->intraCGMode);
comm->intraCC = (int*)waitForNonNullPtr(&comm0->intraCC);
}
comm->intraCudaDevs[comm->intraRank] = comm->cudaDev;
NCCLCHECK(initParams(comm));
int cgMdLaunch = 0;
// Set CG Mode
comm->launchMode = ncclComm::GROUP;
char* str = getenv("NCCL_LAUNCH_MODE");
if (comm->intraRanks == 1 || (str && strcmp(str, "PARALLEL") == 0)) {
comm->launchMode = ncclComm::PARALLEL;
}
if (comm->launchMode == ncclComm::GROUP) {
CUDACHECK(cudaStreamCreateWithFlags(&comm->groupStream, cudaStreamNonBlocking));
#if __CUDACC_VER_MAJOR__ >= 9
if (*comm->intraCC && (ncclCudaFullCompCap() == *comm->intraCC)) {
// Check whether the GPU supports Cooperative Group Multi Device Launch
(void) cudaDeviceGetAttribute(&cgMdLaunch, cudaDevAttrCooperativeMultiDeviceLaunch, comm->cudaDev);
}
#endif
}
// Disable cgMdLaunch if any rank does not support it
if (cgMdLaunch == 0) {
*comm->intraCGMode = 0x10;
}
return ncclSuccess;
}
static ncclResult_t initTransportsRank(struct ncclComm* comm, ncclUniqueId* commId) {
int rank = comm->rank;
int nranks = comm->nRanks;
void* commState;
NCCLCHECK(bootstrapInit(commId, rank, nranks, &commState));
struct ncclInfo* allInfo;
NCCLCHECK(ncclCalloc(&allInfo, nranks));
NCCLCHECK(fillInfo(allInfo+rank, rank));
NCCLCHECK(bootstrapAllGather(commState, allInfo, sizeof(struct ncclInfo)));
int* connectTransport;
ncclTvalue_t* connectValue;
NCCLCHECK(ncclCalloc(&connectTransport, nranks*nranks));
NCCLCHECK(ncclCalloc(&connectValue, nranks*nranks));
NCCLCHECK(fillConnect(allInfo, nranks, rank, connectTransport+nranks*rank, connectValue+nranks*rank));
NCCLCHECK(bootstrapAllGather(commState, connectTransport, nranks*(sizeof(int))));
NCCLCHECK(bootstrapAllGather(commState, connectValue, nranks*(sizeof(ncclTvalue_t))));
//if (rank == 0) dumpMatrix(connectTransport, nranks);
//if (rank == 0) dumpMatrixTvalue(connectValue, nranks);
// Get my rings
int nrings;
int* prev, *next;
NCCLCHECK(ncclCalloc(&prev, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&next, nranks*MAXRINGS));
comm->nThreads = getDefaultThreads();
NCCLCHECK(ncclGetRings(&nrings, &comm->nThreads, rank, nranks, connectTransport, connectValue, prev, next));
free(connectTransport);
free(connectValue);
// Find max nThreads
int allData[nranks];
allData[rank] = comm->nThreads;
NCCLCHECK(bootstrapAllGather(commState, allData, sizeof(int)));
for (int i=0; i<nranks; i++)
comm->nThreads = std::max(allData[i], comm->nThreads);
if (rank == 0) INFO(NCCL_INIT,"Using %d threads", comm->nThreads);
// Determine the minimum CUDA Compute capability of all GPUs
int myCompCap = ncclCudaCompCap();
int minCompCap = myCompCap;
allData[rank] = myCompCap;
NCCLCHECK(bootstrapAllGather(commState, allData, sizeof(int)));
for (int i=0; i<nranks; i++)
minCompCap = std::min(allData[i], minCompCap);
if (rank == 0) INFO(NCCL_INIT,"Min Comp Cap %d", minCompCap);
// Find min nrings across ranks
allData[rank] = nrings;
NCCLCHECK(bootstrapAllGather(commState, allData, sizeof(int)));
for (int i=0; i<nranks; i++)
nrings = std::min(allData[i], nrings);
// Exchange data with others to build complete rings
comm->nRings = nrings;
for (int r=0; r<nrings; r++) {
NCCLCHECK(bootstrapAllGather(commState, prev+r*nranks, sizeof(int)));
NCCLCHECK(bootstrapAllGather(commState, next+r*nranks, sizeof(int)));
}
int *rings;
NCCLCHECK(ncclCalloc(&rings, nranks*MAXRINGS));
NCCLCHECK(buildRings(nrings, rings, rank, nranks, prev, next));
free(prev);
free(next);
// Connect with prev/next for each ring
struct ncclConnect *connectData;
NCCLCHECK(ncclCalloc(&connectData, 2*nranks));
for (int r=0; r<nrings; r++) {
int* ringRanks = rings+r*nranks;
struct ncclRing *ring = comm->rings+r;
NCCLCHECK(setupRing(comm, r, rank, nranks, ringRanks, allInfo, connectData+2*rank));
int prev_offset = ring->userRanks[nranks-1]*2+1;
int next_offset = ring->userRanks[1]*2;
NCCLCHECK(bootstrapAllGather(commState, connectData, sizeof(struct ncclConnect)*2));
NCCLCHECK(ring->send.transport->send.connect(connectData+next_offset, &ring->send));
NCCLCHECK(ring->recv.transport->recv.connect(connectData+prev_offset, &ring->recv));
}
free(connectData);
free(rings);
free(allInfo);
// Intra-process barrier setup
struct rankInfo {
uint64_t hostHash;
uint64_t pidHash;
struct ncclComm* comm;
} rankInfos[nranks];
rankInfos[rank].hostHash = getHostHash();
rankInfos[rank].pidHash = getPidHash();
rankInfos[rank].comm = comm;
NCCLCHECK(bootstrapAllGather(commState, rankInfos, sizeof(struct rankInfo)));
// Compute intra ranks
int intraRank0 = -1, intraRank = -1, intraRanks = 0;
int multiNode = 0;
for (int r=0; r<nranks; r++) {
if ((rankInfos[r].hostHash == rankInfos[rank].hostHash) &&
(rankInfos[r].pidHash == rankInfos[rank].pidHash)) {
if (intraRanks == 0) intraRank0 = r;
if (r == rank) intraRank = intraRanks;
intraRanks++;
} else if (rankInfos[r].hostHash != rankInfos[rank].hostHash) {
multiNode = 1;
}
}
TRACE(NCCL_INIT,"hostHash[%d] %lx intraRank %d intraRanks %d intraRank0 %d",
rank, rankInfos[rank].hostHash, intraRank, intraRanks, intraRank0);
if (intraRank == -1 || intraRank0 == -1 || rankInfos[intraRank0].comm == NULL) {
WARN("Failed to determine intra ranks hostHash[%d] %lx intraRank %d intraRanks %d intraRank0 %d",
rank, rankInfos[rank].hostHash, intraRank, intraRanks, intraRank0);
return ncclInternalError;
}
NCCLCHECK(ncclCommSetIntra(comm, intraRank, intraRanks, rankInfos[intraRank0].comm));
// Determine thread threshold across all GPUs
comm->threadThreshold = ncclThreadThreshold(minCompCap, multiNode);
// Barrier
bootstrapClose(commState);
return ncclSuccess;
}
bool SetCpuAffinity(int cudaDev, nvmlDevice_t* nvmlDevice) {
char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE];
if (cudaDeviceGetPCIBusId(busId, NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE, cudaDev) != cudaSuccess) return false;
if (wrapNvmlDeviceGetHandleByPciBusId(busId, nvmlDevice) != ncclSuccess) return false;
if (wrapNvmlDeviceSetCpuAffinity(*nvmlDevice) != ncclSuccess) {
WARN("Failed to set CPU affinity");
return false;
}
return true;
}
ncclResult_t ncclCommInitRankSync(ncclComm_t* newcomm, int nranks, ncclUniqueId commId, int myrank) {
cpu_set_t affinitySave;
sched_getaffinity(0, sizeof(cpu_set_t), &affinitySave);
NCCLCHECK(wrapNvmlSymbols());
NCCLCHECK(wrapNvmlInit());
// Make sure all host memory allocation are close to the GPU
int cudaDev;
nvmlDevice_t nvmlDevice;
CUDACHECK(cudaGetDevice(&cudaDev));
SetCpuAffinity(cudaDev, &nvmlDevice);
ncclResult_t res;
NCCLCHECKGOTO(commAlloc(newcomm, nranks, myrank), res, cleanup);
NCCLCHECKGOTO(initTransportsRank(*newcomm, &commId), res, cleanup);
NCCLCHECKGOTO(devCommSetup(*newcomm), res, cleanup);
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
NCCLCHECKGOTO(wrapNvmlShutdown(), res, cleanup);
INFO(NCCL_INIT,"comm %p rank %d nranks %d - COMPLETE", *newcomm, myrank, nranks);
return ncclSuccess;
cleanup:
*newcomm = NULL;
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
return res;
}
NCCL_API(ncclResult_t, ncclCommInitRank, ncclComm_t* newcomm, int nranks, ncclUniqueId commId, int myrank);
ncclResult_t ncclCommInitRank(ncclComm_t* newcomm, int nranks, ncclUniqueId commId, int myrank) {
char* env = getenv("NCCL_COMM_ID");
if (env && myrank == 0) {
NCCLCHECK(bootstrapCreateRoot(&commId, true));
}
NCCLCHECK(ncclInit());
if (myrank == 0) showVersion();
INFO(NCCL_INIT,"rank %d nranks %d", myrank, nranks);
// Make sure the CUDA runtime is initialized.
CUDACHECK(cudaFree(NULL));
NCCLCHECK(PtrCheck(newcomm, "CommInitRank", "newcomm"));
if (nranks < 1 || myrank < 0 || myrank >= nranks) {
WARN("Invalid rank requested : %d/%d", myrank, nranks);
return ncclInvalidArgument;
}
if (ncclAsyncMode()) {
int cudaDev;
CUDACHECK(cudaGetDevice(&cudaDev));
return ncclAsyncInit(ncclCommInitRankSync, cudaDev, newcomm, nranks, commId, myrank);
} else {
return ncclCommInitRankSync(newcomm, nranks, commId, myrank);
}
}
static ncclResult_t initTransportsAll(struct ncclComm** comms, const int* devs, int nranks) {
struct ncclInfo* allInfo;
NCCLCHECK(ncclCalloc(&allInfo, nranks));
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(cudaSetDevice(devs[rank]));
NCCLCHECK(fillInfo(allInfo+rank, rank));
}
int* connectTransport;
ncclTvalue_t* connectValue;
NCCLCHECK(ncclCalloc(&connectTransport, nranks*nranks));
NCCLCHECK(ncclCalloc(&connectValue, nranks*nranks));
for (int rank=0; rank<nranks; rank++)
NCCLCHECK(fillConnect(allInfo, nranks, rank, connectTransport+nranks*rank, connectValue+nranks*rank));
int* prev, *prevFinal, *next, *nextFinal;
NCCLCHECK(ncclCalloc(&prev, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&prevFinal, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&next, nranks*MAXRINGS));
NCCLCHECK(ncclCalloc(&nextFinal, nranks*MAXRINGS));
int nrings = MAXRINGS;
int nthreads=0;
int myCompCap = ncclCudaCompCap();
int minCompCap = myCompCap;
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(cudaSetDevice(devs[rank]));
int nringsRank;
int nthreadsRank = getDefaultThreads();
myCompCap = ncclCudaCompCap();
NCCLCHECK(ncclGetRings(&nringsRank, &nthreadsRank, rank, nranks, connectTransport, connectValue, prev, next));
nrings = std::min(nrings, nringsRank);
nthreads = std::max(nthreads, nthreadsRank);
minCompCap = std::min(minCompCap, myCompCap);
for (int ring=0; ring<nrings; ring++) {
int index = ring*nranks+rank;
prevFinal[index] = prev[index];
nextFinal[index] = next[index];
}
}
free(connectTransport);
free(connectValue);
free(prev);
free(next);
INFO(NCCL_INIT,"Using %d threads", nthreads);
INFO(NCCL_INIT,"Min Comp Cap %d", minCompCap);
int* rings;
NCCLCHECK(ncclCalloc(&rings, nranks*MAXRINGS));
NCCLCHECK(buildRings(nrings, rings, 0, nranks, prevFinal, nextFinal));
free(prevFinal);
free(nextFinal);
// Determine thread threshold across all GPUs
int threadThreshold = ncclThreadThreshold(minCompCap, 0);
for (int rank=0; rank<nranks; rank++) {
comms[rank]->nRings = nrings;
comms[rank]->nThreads = nthreads;
comms[rank]->threadThreshold = threadThreshold;
}
for (int r=0; r<nrings; r++) {
struct ncclConnect connect[2*nranks];
int* ringRanks = rings+r*nranks;
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(cudaSetDevice(devs[rank]));
NCCLCHECK(setupRing(comms[rank], r, rank, nranks, ringRanks, allInfo, connect+2*rank));
}
// RingExchange connect information
for (int rank=0; rank<nranks; rank++) {
// Swap rank->prev and prevRank->next
struct ncclRing *ring = comms[rank]->rings+r;
int prevRank = ring->userRanks[nranks-1];
struct ncclConnect* prevRankNextConnect = connect+2*prevRank+1;
struct ncclConnect* rankPrevConnect = connect+2*rank;
swap(prevRankNextConnect, rankPrevConnect, sizeof(struct ncclConnect));
}
for (int rank=0; rank<nranks; rank++) {
CUDACHECK(cudaSetDevice(devs[rank]));
struct ncclRing *ring = comms[rank]->rings+r;
NCCLCHECK(ring->send.transport->send.connect(connect+2*rank+1, &ring->send));
NCCLCHECK(ring->recv.transport->recv.connect(connect+2*rank+0, &ring->recv));
}
}
free(rings);
free(allInfo);
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclCommInitAll, ncclComm_t* comms, int ndev, const int* devlist);
ncclResult_t ncclCommInitAll(ncclComm_t* comms, int ndev, const int* devlist) {
NCCLCHECK(ncclInit());
NCCLCHECK(wrapNvmlSymbols());
NCCLCHECK(wrapNvmlInit());
showVersion();
INFO(NCCL_INIT,"nranks %d", ndev);
NCCLCHECK(PtrCheck(comms, "CommInitAll", "comms"));
if (ndev < 1) {
WARN("Invalid device count requested : %d", ndev);
return ncclInvalidArgument;
}
ncclResult_t res;
int savedDevice;
int rank, cudaDev;
ncclComm_t comm = NULL;
nvmlDevice_t nvmlDevice;
int ncclDevList[ndev];
for (int i=0; i<ndev; i++) {
ncclDevList[i] = devlist ? devlist[i] : i;
}
cudaGetDevice(&savedDevice);
for(rank=0; rank<ndev; ++rank)
comms[rank] = NULL;
cpu_set_t affinitySave;
sched_getaffinity(0, sizeof(cpu_set_t), &affinitySave);
for (rank=0; rank<ndev; ++rank) {
cudaDev = ncclDevList[rank];
CUDACHECKGOTO(cudaSetDevice(cudaDev), res, cleanup);
SetCpuAffinity(cudaDev, &nvmlDevice);
NCCLCHECKGOTO(commAlloc(&comm, ndev, rank), res, cleanup);
comms[rank] = comm;
NCCLCHECKGOTO(ncclCommSetIntra(comm, rank, ndev, comms[0]), res, cleanup);
}
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
NCCLCHECKGOTO(initTransportsAll(comms, ncclDevList, ndev), res, cleanup);
for(rank=0; rank<ndev; ++rank) {
cudaDev = ncclDevList[rank];
CUDACHECKGOTO(cudaSetDevice(cudaDev), res, cleanup);
NCCLCHECKGOTO(devCommSetup(comms[rank]), res, cleanup);
}
res = ncclSuccess;
goto final;
cleanup:
for(rank=0; rank<ndev; ++rank) {
if(comms[rank] != NULL) {
commFree(comms[rank]);
}
}
final:
if(wrapNvmlShutdown() != ncclSuccess)
INFO(NCCL_INIT,"NCCL did not shutdown nvml properly");
cudaSetDevice(savedDevice);
sched_setaffinity(0, sizeof(cpu_set_t), &affinitySave);
return res;
}
NCCL_API(ncclResult_t, ncclCommDestroy, ncclComm_t comm);
ncclResult_t ncclCommDestroy(ncclComm_t comm) {
if (comm == NULL)
return ncclSuccess;
int savedDevice;
CUDACHECK(cudaGetDevice(&savedDevice));
int commDevice = comm->cudaDev;
if (savedDevice != commDevice) {
CUDACHECK(cudaSetDevice(commDevice));
}
NCCLCHECK(commFree(comm));
if (savedDevice != commDevice)
CUDACHECK(cudaSetDevice(savedDevice));
return ncclSuccess;
}
NCCL_API(const char*, ncclGetErrorString, ncclResult_t code);
const char* ncclGetErrorString(ncclResult_t code) {
switch (code) {
case ncclSuccess : return "no error";
case ncclUnhandledCudaError : return "unhandled cuda error";
case ncclSystemError : return "unhandled system error";
case ncclInternalError : return "internal error";
case ncclInvalidArgument : return "invalid argument";
case ncclInvalidUsage : return "invalid usage";
default : return "unknown result code";
}
}
NCCL_API(ncclResult_t, ncclCommCount, const ncclComm_t comm, int* count);
ncclResult_t ncclCommCount(const ncclComm_t comm, int* count) {
NCCLCHECK(PtrCheck(comm, "CommCount", "comm"));
NCCLCHECK(PtrCheck(count, "CommCount", "count"));
*count = comm->nRanks;
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclCommCuDevice, const ncclComm_t comm, int* devid);
ncclResult_t ncclCommCuDevice(const ncclComm_t comm, int* devid) {
NCCLCHECK(PtrCheck(comm, "CommCuDevice", "comm"));
NCCLCHECK(PtrCheck(devid, "CommCuDevice", "devid"));
*devid = comm->cudaDev;
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclCommUserRank, const ncclComm_t comm, int* rank);
ncclResult_t ncclCommUserRank(const ncclComm_t comm, int* rank) {
NCCLCHECK(PtrCheck(comm, "CommUserRank", "comm"));
NCCLCHECK(PtrCheck(rank, "CommUserRank", "rank"));
*rank = comm->rank;
return ncclSuccess;
}
|
99efbbb5129971996282309f02272971d1cd14cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "..\Prerequisites.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
char const * const errId = "GTOM:Projection:ForwardProjRaytrace:InvalidInput";
mxInitGPU();
if (nrhs != 4)
mexErrMsgIdAndTxt(errId, "Wrong parameter count (4 expected).");
mxArrayAdapter volume(prhs[0]);
int3 dimsvolume = MWDimsToInt3(mxGetNumberOfDimensions(volume.underlyingarray), mxGetDimensions(volume.underlyingarray));
tfloat* d_volume = volume.GetAsManagedDeviceTFloat();
mxArrayAdapter angles(prhs[1]);
int3 dimsangles = MWDimsToInt3(mxGetNumberOfDimensions(angles.underlyingarray), mxGetDimensions(angles.underlyingarray));
if (dimsangles.x != 3)
mexErrMsgIdAndTxt(errId, "3 values per column expected for angles.");
tfloat3* h_angles = (tfloat3*)angles.GetAsManagedTFloat();
int batch = dimsangles.y;
mxArrayAdapter shifts(prhs[2]);
int3 dimsshifts = MWDimsToInt3(mxGetNumberOfDimensions(shifts.underlyingarray), mxGetDimensions(shifts.underlyingarray));
if (dimsshifts.x != 2 || dimsshifts.y != batch)
mexErrMsgIdAndTxt(errId, "2 values per column expected for shifts.");
tfloat2* h_shifts = (tfloat2*)shifts.GetAsManagedTFloat();
mxArrayAdapter scales(prhs[3]);
int3 dimsscales = MWDimsToInt3(mxGetNumberOfDimensions(scales.underlyingarray), mxGetDimensions(scales.underlyingarray));
if (dimsscales.x != 2 || dimsscales.y != batch)
mexErrMsgIdAndTxt(errId, "2 values per column expected for scales.");
tfloat2* h_scales = (tfloat2*)scales.GetAsManagedTFloat();
int2 dimsproj = toInt2(dimsvolume.x, dimsvolume.x);
tfloat* d_proj;
hipMalloc((void**)&d_proj, Elements2(dimsproj) * dimsangles.y * sizeof(tfloat));
d_ProjForwardRaytrace(d_volume, dimsvolume, tfloat3(0), d_proj, dimsproj, h_angles, h_shifts, h_scales, T_INTERP_CUBIC, 1, batch);
mwSize outputdims[3];
outputdims[0] = dimsproj.x;
outputdims[1] = dimsproj.y;
outputdims[2] = batch;
mxArrayAdapter A(mxCreateNumericArray(3,
outputdims,
mxGetClassID(volume.underlyingarray),
mxREAL));
A.SetFromDeviceTFloat(d_proj);
hipFree(d_proj);
plhs[0] = A.underlyingarray;
}
|
99efbbb5129971996282309f02272971d1cd14cb.cu
|
#include "..\Prerequisites.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
char const * const errId = "GTOM:Projection:ForwardProjRaytrace:InvalidInput";
mxInitGPU();
if (nrhs != 4)
mexErrMsgIdAndTxt(errId, "Wrong parameter count (4 expected).");
mxArrayAdapter volume(prhs[0]);
int3 dimsvolume = MWDimsToInt3(mxGetNumberOfDimensions(volume.underlyingarray), mxGetDimensions(volume.underlyingarray));
tfloat* d_volume = volume.GetAsManagedDeviceTFloat();
mxArrayAdapter angles(prhs[1]);
int3 dimsangles = MWDimsToInt3(mxGetNumberOfDimensions(angles.underlyingarray), mxGetDimensions(angles.underlyingarray));
if (dimsangles.x != 3)
mexErrMsgIdAndTxt(errId, "3 values per column expected for angles.");
tfloat3* h_angles = (tfloat3*)angles.GetAsManagedTFloat();
int batch = dimsangles.y;
mxArrayAdapter shifts(prhs[2]);
int3 dimsshifts = MWDimsToInt3(mxGetNumberOfDimensions(shifts.underlyingarray), mxGetDimensions(shifts.underlyingarray));
if (dimsshifts.x != 2 || dimsshifts.y != batch)
mexErrMsgIdAndTxt(errId, "2 values per column expected for shifts.");
tfloat2* h_shifts = (tfloat2*)shifts.GetAsManagedTFloat();
mxArrayAdapter scales(prhs[3]);
int3 dimsscales = MWDimsToInt3(mxGetNumberOfDimensions(scales.underlyingarray), mxGetDimensions(scales.underlyingarray));
if (dimsscales.x != 2 || dimsscales.y != batch)
mexErrMsgIdAndTxt(errId, "2 values per column expected for scales.");
tfloat2* h_scales = (tfloat2*)scales.GetAsManagedTFloat();
int2 dimsproj = toInt2(dimsvolume.x, dimsvolume.x);
tfloat* d_proj;
cudaMalloc((void**)&d_proj, Elements2(dimsproj) * dimsangles.y * sizeof(tfloat));
d_ProjForwardRaytrace(d_volume, dimsvolume, tfloat3(0), d_proj, dimsproj, h_angles, h_shifts, h_scales, T_INTERP_CUBIC, 1, batch);
mwSize outputdims[3];
outputdims[0] = dimsproj.x;
outputdims[1] = dimsproj.y;
outputdims[2] = batch;
mxArrayAdapter A(mxCreateNumericArray(3,
outputdims,
mxGetClassID(volume.underlyingarray),
mxREAL));
A.SetFromDeviceTFloat(d_proj);
cudaFree(d_proj);
plhs[0] = A.underlyingarray;
}
|
53eb04d58f4c5c03b642d5ad2dd8f694b3178727.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void BFS_kernel_multi_block_spill( volatile unsigned int *frontier, volatile unsigned int *frontier2, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, const unsigned int max_local_mem)
{
extern volatile __shared__ unsigned int b_q[];
volatile __shared__ unsigned int b_q_length[1];
volatile __shared__ unsigned int b_offset[1];
//get the threadId
unsigned int tid=threadIdx.x + blockDim.x * blockIdx.x;
unsigned int lid=threadIdx.x;
//initialize the block queue length and warp queue offset
if (lid == 0 )
{
b_q_length[0]=0;
b_offset[0]=0;
}
__syncthreads();
//Initialize the warp queue sizes to 0
if(tid<frontier_len)
{
//get the nodes to traverse from block queue
unsigned int node_to_process=frontier[tid];
visited[node_to_process]=0;
//get the offsets of the vertex in the edge list
unsigned int offset=edgeArray[node_to_process];
unsigned int next=edgeArray[node_to_process+1];
//Iterate through the neighbors of the vertex
while(offset<next)
{
//get neighbor
unsigned int nid=edgeArrayAux[offset];
//get its cost
unsigned int v=atomicMin((unsigned int *)&cost[nid],
cost[node_to_process]+1);
//if cost is less than previously set add to frontier
if(v>cost[node_to_process]+1)
{
int is_in_frontier=atomicExch((int *)&visited[nid],1);
//if node already in frontier do nothing
if(is_in_frontier==0)
{
//increment the warp queue size
unsigned int t=atomicAdd((unsigned int *)&b_q_length[0],
1);
if(t<max_local_mem)
{
b_q[t]=nid;
}
//write to global memory if shared memory full
else
{
int off=atomicAdd((unsigned int *)frontier_length,
1);
frontier2[off]=nid;
}
}
}
offset++;
}
}
__syncthreads();
//get block queue offset in global queue
if(lid==0)
{
if(b_q_length[0] > max_local_mem)
{
b_q_length[0]=max_local_mem;
}
b_offset[0]=atomicAdd((unsigned int *)frontier_length,b_q_length[0]);
}
__syncthreads();
//copy block queue to frontier
if(lid < b_q_length[0])
frontier2[lid+b_offset[0]]=b_q[lid];
}
|
53eb04d58f4c5c03b642d5ad2dd8f694b3178727.cu
|
#include "includes.h"
__global__ void BFS_kernel_multi_block_spill( volatile unsigned int *frontier, volatile unsigned int *frontier2, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, const unsigned int max_local_mem)
{
extern volatile __shared__ unsigned int b_q[];
volatile __shared__ unsigned int b_q_length[1];
volatile __shared__ unsigned int b_offset[1];
//get the threadId
unsigned int tid=threadIdx.x + blockDim.x * blockIdx.x;
unsigned int lid=threadIdx.x;
//initialize the block queue length and warp queue offset
if (lid == 0 )
{
b_q_length[0]=0;
b_offset[0]=0;
}
__syncthreads();
//Initialize the warp queue sizes to 0
if(tid<frontier_len)
{
//get the nodes to traverse from block queue
unsigned int node_to_process=frontier[tid];
visited[node_to_process]=0;
//get the offsets of the vertex in the edge list
unsigned int offset=edgeArray[node_to_process];
unsigned int next=edgeArray[node_to_process+1];
//Iterate through the neighbors of the vertex
while(offset<next)
{
//get neighbor
unsigned int nid=edgeArrayAux[offset];
//get its cost
unsigned int v=atomicMin((unsigned int *)&cost[nid],
cost[node_to_process]+1);
//if cost is less than previously set add to frontier
if(v>cost[node_to_process]+1)
{
int is_in_frontier=atomicExch((int *)&visited[nid],1);
//if node already in frontier do nothing
if(is_in_frontier==0)
{
//increment the warp queue size
unsigned int t=atomicAdd((unsigned int *)&b_q_length[0],
1);
if(t<max_local_mem)
{
b_q[t]=nid;
}
//write to global memory if shared memory full
else
{
int off=atomicAdd((unsigned int *)frontier_length,
1);
frontier2[off]=nid;
}
}
}
offset++;
}
}
__syncthreads();
//get block queue offset in global queue
if(lid==0)
{
if(b_q_length[0] > max_local_mem)
{
b_q_length[0]=max_local_mem;
}
b_offset[0]=atomicAdd((unsigned int *)frontier_length,b_q_length[0]);
}
__syncthreads();
//copy block queue to frontier
if(lid < b_q_length[0])
frontier2[lid+b_offset[0]]=b_q[lid];
}
|
8f0e23f4742374c00263adc18f465a890e214618.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "soa.h"
#include <stdlib.h>
struct VectorNArray2DSoA makeGPUArray2DSoA(int Nx, int Ny) {
struct VectorNArray2DSoA vecarray;
vecarray.Nx = Nx;
vecarray.Ny = Ny;
for (int i = 0; i < N; i++) {
hipMalloc(&vecarray.data[i], sizeof(double) * Nx * Ny);
}
return vecarray;
}
struct VectorNArray2DSoA makeCPUArray2DSoA(int Nx, int Ny) {
struct VectorNArray2DSoA vecarray;
vecarray.Nx = Nx;
vecarray.Ny = Ny;
for (int i = 0; i < N; i++) {
vecarray.data[i] = (double *)malloc(sizeof(double) * Nx * Ny);
}
return vecarray;
}
void copyArray2DSoA(struct VectorNArray2DSoA *src, struct VectorNArray2DSoA *dst,
hipMemcpyKind kind){
for (int i = 0; i < N; i++) {
hipMemcpy(dst->data[i],src->data[i],sizeof(double)*src->Nx*src->Ny,kind);
}
}
void copyArray2DAoS(struct VectorN* src, struct VectorN *dst, int Nx, int Ny,
hipMemcpyKind kind){
hipMemcpy(dst,src,sizeof(struct VectorN)* Nx*Ny,kind);
}
void DeleteGPUArray2DSoA(struct VectorNArray2DSoA *vecarray) {
for (int i = 0; i < N; i++) {
hipFree(vecarray->data[i]);
}
}
void DeleteCPUArray2DSoA(struct VectorNArray2DSoA *vecarray) {
for (int i = 0; i < N; i++) {
free(vecarray->data[i]);
}
}
struct VectorN *makeArray2DAoS(int Nx, int Ny) {
struct VectorN *ptr;
hipMalloc(&ptr, sizeof(struct VectorN) * Nx * Ny);
return ptr;
}
void DeleteArray2DAoS(struct VectorN *ptr) { hipFree(ptr); }
|
8f0e23f4742374c00263adc18f465a890e214618.cu
|
#include "soa.h"
#include <stdlib.h>
struct VectorNArray2DSoA makeGPUArray2DSoA(int Nx, int Ny) {
struct VectorNArray2DSoA vecarray;
vecarray.Nx = Nx;
vecarray.Ny = Ny;
for (int i = 0; i < N; i++) {
cudaMalloc(&vecarray.data[i], sizeof(double) * Nx * Ny);
}
return vecarray;
}
struct VectorNArray2DSoA makeCPUArray2DSoA(int Nx, int Ny) {
struct VectorNArray2DSoA vecarray;
vecarray.Nx = Nx;
vecarray.Ny = Ny;
for (int i = 0; i < N; i++) {
vecarray.data[i] = (double *)malloc(sizeof(double) * Nx * Ny);
}
return vecarray;
}
void copyArray2DSoA(struct VectorNArray2DSoA *src, struct VectorNArray2DSoA *dst,
cudaMemcpyKind kind){
for (int i = 0; i < N; i++) {
cudaMemcpy(dst->data[i],src->data[i],sizeof(double)*src->Nx*src->Ny,kind);
}
}
void copyArray2DAoS(struct VectorN* src, struct VectorN *dst, int Nx, int Ny,
cudaMemcpyKind kind){
cudaMemcpy(dst,src,sizeof(struct VectorN)* Nx*Ny,kind);
}
void DeleteGPUArray2DSoA(struct VectorNArray2DSoA *vecarray) {
for (int i = 0; i < N; i++) {
cudaFree(vecarray->data[i]);
}
}
void DeleteCPUArray2DSoA(struct VectorNArray2DSoA *vecarray) {
for (int i = 0; i < N; i++) {
free(vecarray->data[i]);
}
}
struct VectorN *makeArray2DAoS(int Nx, int Ny) {
struct VectorN *ptr;
cudaMalloc(&ptr, sizeof(struct VectorN) * Nx * Ny);
return ptr;
}
void DeleteArray2DAoS(struct VectorN *ptr) { cudaFree(ptr); }
|
cf8801d24536267f0462e2b34bcbdeb35bf0e389.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Implementing parallell plus reduce in CUDA.
*/
#include <stdio.h>
#define NUM_THREADS 16
#define NUM_BLOCKS 8
unsigned int serial_reduce(unsigned int* array, const unsigned int size){
unsigned int sum = 0;
for(int i = 0; i < size; i++){
sum += array[i];
}
return sum;
}
__global__ void reduce(unsigned int* d_in, unsigned int* d_out){
unsigned int local_idx = threadIdx.x;
unsigned int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int num_threads = blockDim.x;
extern __shared__ unsigned int shared_array [];
shared_array[local_idx] = d_in[global_idx];
__syncthreads();
for(unsigned int i = 1; i < num_threads; i *= 2){
if(local_idx % 2 * i == 0){
shared_array[local_idx] = shared_array[local_idx] + shared_array[local_idx + i];
}
__syncthreads();
}
d_out[blockIdx.x] = shared_array[0];
}
int main(){
const unsigned int NUM_ELEMENTS = NUM_THREADS * NUM_BLOCKS;
const unsigned int IN_BYTES = NUM_ELEMENTS * sizeof(int);
const unsigned int OUT_BYTES = NUM_BLOCKS * sizeof(int);
unsigned int h_in [NUM_ELEMENTS];
for(int i = 0; i < NUM_ELEMENTS; i++){
h_in[i] = i;
}
unsigned int h_out [NUM_BLOCKS];
unsigned int* d_in;
unsigned int* d_out;
hipMalloc((void **) &d_in, IN_BYTES);
hipMalloc((void **) &d_out, OUT_BYTES);
hipMemcpy(d_in, h_in, IN_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reduce), dim3(NUM_BLOCKS), dim3(NUM_THREADS), NUM_THREADS * sizeof(int), 0, d_in, d_out);
hipMemcpy(h_out, d_out, OUT_BYTES, hipMemcpyDeviceToHost);
printf("True: %d \n", serial_reduce(h_in, NUM_ELEMENTS));
// Doing a final serial reduce since output is of size NUM_BLOCKS
printf("Output: %d", serial_reduce(h_out, NUM_BLOCKS));
hipFree(d_in);
hipFree(d_out);
return 0;
}
|
cf8801d24536267f0462e2b34bcbdeb35bf0e389.cu
|
/*
Implementing parallell plus reduce in CUDA.
*/
#include <stdio.h>
#define NUM_THREADS 16
#define NUM_BLOCKS 8
unsigned int serial_reduce(unsigned int* array, const unsigned int size){
unsigned int sum = 0;
for(int i = 0; i < size; i++){
sum += array[i];
}
return sum;
}
__global__ void reduce(unsigned int* d_in, unsigned int* d_out){
unsigned int local_idx = threadIdx.x;
unsigned int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int num_threads = blockDim.x;
extern __shared__ unsigned int shared_array [];
shared_array[local_idx] = d_in[global_idx];
__syncthreads();
for(unsigned int i = 1; i < num_threads; i *= 2){
if(local_idx % 2 * i == 0){
shared_array[local_idx] = shared_array[local_idx] + shared_array[local_idx + i];
}
__syncthreads();
}
d_out[blockIdx.x] = shared_array[0];
}
int main(){
const unsigned int NUM_ELEMENTS = NUM_THREADS * NUM_BLOCKS;
const unsigned int IN_BYTES = NUM_ELEMENTS * sizeof(int);
const unsigned int OUT_BYTES = NUM_BLOCKS * sizeof(int);
unsigned int h_in [NUM_ELEMENTS];
for(int i = 0; i < NUM_ELEMENTS; i++){
h_in[i] = i;
}
unsigned int h_out [NUM_BLOCKS];
unsigned int* d_in;
unsigned int* d_out;
cudaMalloc((void **) &d_in, IN_BYTES);
cudaMalloc((void **) &d_out, OUT_BYTES);
cudaMemcpy(d_in, h_in, IN_BYTES, cudaMemcpyHostToDevice);
reduce<<<NUM_BLOCKS, NUM_THREADS, NUM_THREADS * sizeof(int)>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, OUT_BYTES, cudaMemcpyDeviceToHost);
printf("True: %d \n", serial_reduce(h_in, NUM_ELEMENTS));
// Doing a final serial reduce since output is of size NUM_BLOCKS
printf("Output: %d", serial_reduce(h_out, NUM_BLOCKS));
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
a10c6e36c4964c5183f22946288fb390d800b674.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Block size X: 32 */
__global__ void fct_ale_a3(const int maxLevels, const int maxElements, const int * __restrict__ nLevels, const int * __restrict__ elements_in_node, const int * __restrict__ number_elements_in_node, const double2 * __restrict__ UV_rhs, double * __restrict__ fct_ttf_max, double * __restrict__ fct_ttf_min, const double * __restrict__ fct_lo)
{
int item = 0;
extern __shared__ double sharedBuffer[];
double * tvert_max = (double *)(sharedBuffer);
double * tvert_min = (double *)(&sharedBuffer[maxLevels]);
/* Compute tvert_max and tvert_min per level */
for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 )
{
double tvert_max_temp = 0.0;
double tvert_min_temp = 0.0;
item = ((elements_in_node[(blockIdx.x * maxElements)] - 1) * maxLevels) + (level);
tvert_max_temp = (UV_rhs[item]).x;
tvert_min_temp = (UV_rhs[item]).y;
for ( int element = 1; element < number_elements_in_node[blockIdx.x]; element++ )
{
item = ((elements_in_node[(blockIdx.x * maxElements) + element] - 1) * maxLevels) + (level);
tvert_max_temp = fmax(tvert_max_temp, (UV_rhs[item]).x);
tvert_min_temp = fmin(tvert_min_temp, (UV_rhs[item]).y);
}
tvert_max[level] = tvert_max_temp;
tvert_min[level] = tvert_min_temp;
}
__syncthreads();
/* Update fct_ttf_max and fct_ttf_min per level */
item = blockIdx.x * maxLevels;
for ( int level = threadIdx.x + 1; level < nLevels[blockIdx.x] - 2; level += 32 )
{
double temp = 0.0;
temp = fmax(tvert_max[(level) - 1], tvert_max[level]);
temp = fmax(temp, tvert_max[(level) + 1]);
fct_ttf_max[item + level] = temp - fct_lo[item + level];
temp = fmin(tvert_min[(level) - 1], tvert_min[level]);
temp = fmin(temp, tvert_min[(level) + 1]);
fct_ttf_min[item + level] = temp - fct_lo[item + level];
}
if ( threadIdx.x == 0 )
{
fct_ttf_max[item] = tvert_max[0] - fct_lo[item];
fct_ttf_min[item] = tvert_min[0] - fct_lo[item];
fct_ttf_max[item + (nLevels[blockIdx.x] - 2)] = tvert_max[nLevels[blockIdx.x] - 2] - fct_lo[item + (nLevels[blockIdx.x] - 2)];
fct_ttf_min[item + (nLevels[blockIdx.x] - 2)] = tvert_min[nLevels[blockIdx.x] - 2] - fct_lo[item + (nLevels[blockIdx.x] - 2)];
}
}
|
a10c6e36c4964c5183f22946288fb390d800b674.cu
|
/* Block size X: 32 */
__global__ void fct_ale_a3(const int maxLevels, const int maxElements, const int * __restrict__ nLevels, const int * __restrict__ elements_in_node, const int * __restrict__ number_elements_in_node, const double2 * __restrict__ UV_rhs, double * __restrict__ fct_ttf_max, double * __restrict__ fct_ttf_min, const double * __restrict__ fct_lo)
{
int item = 0;
extern __shared__ double sharedBuffer[];
double * tvert_max = (double *)(sharedBuffer);
double * tvert_min = (double *)(&sharedBuffer[maxLevels]);
/* Compute tvert_max and tvert_min per level */
for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 )
{
double tvert_max_temp = 0.0;
double tvert_min_temp = 0.0;
item = ((elements_in_node[(blockIdx.x * maxElements)] - 1) * maxLevels) + (level);
tvert_max_temp = (UV_rhs[item]).x;
tvert_min_temp = (UV_rhs[item]).y;
for ( int element = 1; element < number_elements_in_node[blockIdx.x]; element++ )
{
item = ((elements_in_node[(blockIdx.x * maxElements) + element] - 1) * maxLevels) + (level);
tvert_max_temp = fmax(tvert_max_temp, (UV_rhs[item]).x);
tvert_min_temp = fmin(tvert_min_temp, (UV_rhs[item]).y);
}
tvert_max[level] = tvert_max_temp;
tvert_min[level] = tvert_min_temp;
}
__syncthreads();
/* Update fct_ttf_max and fct_ttf_min per level */
item = blockIdx.x * maxLevels;
for ( int level = threadIdx.x + 1; level < nLevels[blockIdx.x] - 2; level += 32 )
{
double temp = 0.0;
temp = fmax(tvert_max[(level) - 1], tvert_max[level]);
temp = fmax(temp, tvert_max[(level) + 1]);
fct_ttf_max[item + level] = temp - fct_lo[item + level];
temp = fmin(tvert_min[(level) - 1], tvert_min[level]);
temp = fmin(temp, tvert_min[(level) + 1]);
fct_ttf_min[item + level] = temp - fct_lo[item + level];
}
if ( threadIdx.x == 0 )
{
fct_ttf_max[item] = tvert_max[0] - fct_lo[item];
fct_ttf_min[item] = tvert_min[0] - fct_lo[item];
fct_ttf_max[item + (nLevels[blockIdx.x] - 2)] = tvert_max[nLevels[blockIdx.x] - 2] - fct_lo[item + (nLevels[blockIdx.x] - 2)];
fct_ttf_min[item + (nLevels[blockIdx.x] - 2)] = tvert_min[nLevels[blockIdx.x] - 2] - fct_lo[item + (nLevels[blockIdx.x] - 2)];
}
}
|
045a2f555eff7c415629dff5450313f2a871b5ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include <hip/hip_runtime_api.h>
#include <hip/device_functions.h>
#include<stdio.h>
using std::cout;
using std::generate;
using std::vector;
//pull out matrix and shared memory tile size
const int N = 1 << 10;
const int SHMEM_SIZE = 1 << 10;
__global__ void matrixTiledMultiplication(const int* a, const int* b, int* c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Statically allocated shared memory
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
int tmp = 0;
//Load in elements for this tiles
for (int i = 0; i < N; i += blockDim.x) {
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * N + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
// Wait for all threads to finish using current tiles before loading in new
// ones
__syncthreads();
c[row * N + col] = tmp;
}
}
void verify_result(vector<int>&a , vector<int>&b , vector<int>&c) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
int tmp = 0;
for (int k = 0; k < N; k++) {
tmp += a[i * N + k] * b[k * N + j];
}
assert(tmp == c[i * N + j]);
}
}
}
int main()
{
size_t bytes = N * N * sizeof(int);
vector<int>h_a(N * N);
vector<int>h_b(N * N);
vector<int>h_c(N * N);
generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
int* d_a, * d_b, * d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice);
int THREADS = 32;
int BLOCKS = N / THREADS;
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
matrixTiledMultiplication << < blocks , threads >> > (d_a, d_b, d_c);
hipMemcpy(h_c.data(), d_c, bytes, hipMemcpyDeviceToHost);
verify_result(h_a, h_b, h_c);
cout << "COMPLETED SUCCESSFULLY\n";
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
045a2f555eff7c415629dff5450313f2a871b5ae.cu
|
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include <cuda_runtime_api.h>
#include <device_functions.h>
#include<stdio.h>
using std::cout;
using std::generate;
using std::vector;
//pull out matrix and shared memory tile size
const int N = 1 << 10;
const int SHMEM_SIZE = 1 << 10;
__global__ void matrixTiledMultiplication(const int* a, const int* b, int* c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Statically allocated shared memory
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
int tmp = 0;
//Load in elements for this tiles
for (int i = 0; i < N; i += blockDim.x) {
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * N + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
// Wait for all threads to finish using current tiles before loading in new
// ones
__syncthreads();
c[row * N + col] = tmp;
}
}
void verify_result(vector<int>&a , vector<int>&b , vector<int>&c) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
int tmp = 0;
for (int k = 0; k < N; k++) {
tmp += a[i * N + k] * b[k * N + j];
}
assert(tmp == c[i * N + j]);
}
}
}
int main()
{
size_t bytes = N * N * sizeof(int);
vector<int>h_a(N * N);
vector<int>h_b(N * N);
vector<int>h_c(N * N);
generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
int* d_a, * d_b, * d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
int THREADS = 32;
int BLOCKS = N / THREADS;
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
matrixTiledMultiplication << < blocks , threads >> > (d_a, d_b, d_c);
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
verify_result(h_a, h_b, h_c);
cout << "COMPLETED SUCCESSFULLY\n";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
96c11dbdd5dd62112bc64b8cf05b1dcedc38b6cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zmergeqmr.cu, normal z -> d, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from qmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dqmr_1_kernel(
int num_rows,
int num_cols,
double rho,
double psi,
double *y,
double *z,
double *v,
double *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
double ztmp = z[ i+j*num_rows ] / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho double
scalar
@param[in]
psi double
scalar
@param[in,out]
y magmaDouble_ptr
vector
@param[in,out]
z magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
w magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
double rho,
double psi,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr v,
magmaDouble_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, rho, psi,
y, z, v, w );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_2_kernel(
int num_rows,
int num_cols,
double pde,
double rde,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr p,
magmaDouble_ptr q )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = y[ i+j*num_rows ] - pde * p[ i+j*num_rows ];
q[ i+j*num_rows ] = z[ i+j*num_rows ] - rde * q[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = y - pde * p
q = z - rde * q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
pde double
scalar
@param[in]
rde double
scalar
@param[in]
y magmaDouble_ptr
vector
@param[in]
z magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in,out]
q magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
double pde,
double rde,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr p,
magmaDouble_ptr q,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, pde, rde, y, z, p, q );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_3_kernel(
int num_rows,
int num_cols,
double beta,
double *pt,
double *v,
double *y )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp;
y[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = pt - beta * v
y = v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
y magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
magmaDouble_ptr pt,
magmaDouble_ptr v,
magmaDouble_ptr y,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, pt, v, y );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_4_kernel(
int num_rows,
int num_cols,
double eta,
double *p,
double *pt,
double *d,
double *s,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmpd = eta * p[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
double tmps = eta * pt[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p;
s = eta * pt;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta double
scalar
@param[in]
p magmaDouble_ptr
vector
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
d magmaDouble_ptr
vector
@param[in,out]
s magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
double eta,
magmaDouble_ptr p,
magmaDouble_ptr pt,
magmaDouble_ptr d,
magmaDouble_ptr s,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_5_kernel(
int num_rows,
int num_cols,
double eta,
double pds,
double *p,
double *pt,
double *d,
double *s,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmpd = eta * p[ i+j*num_rows ] + pds * d[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
double tmps = eta * pt[ i+j*num_rows ] + pds * s[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p + pds * d;
s = eta * pt + pds * s;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta double
scalar
@param[in]
pds double
scalar
@param[in]
p magmaDouble_ptr
vector
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
d magmaDouble_ptr
vector
@param[in,out]
s magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
double eta,
double pds,
magmaDouble_ptr p,
magmaDouble_ptr pt,
magmaDouble_ptr d,
magmaDouble_ptr s,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_5_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, pds, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_6_kernel(
int num_rows,
int num_cols,
double beta,
double rho,
double psi,
double *y,
double *z,
double *v,
double *w,
double *wt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double wttmp = wt[ i+j*num_rows ]
- MAGMA_D_CONJ( beta ) * w[ i+j*num_rows ];
wt[ i+j*num_rows ] = wttmp;
double ztmp = wttmp / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
double ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
wt = wt - conj(beta) * w
v = y / rho
y = y / rho
w = wt / psi
z = wt / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
rho double
scalar
@param[in]
psi double
scalar
@param[in,out]
y magmaDouble_ptr
vector
@param[in,out]
z magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
w magmaDouble_ptr
vector
@param[in,out]
wt magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_6(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
double rho,
double psi,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr v,
magmaDouble_ptr w,
magmaDouble_ptr wt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_6_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, rho, psi,
y, z, v, w, wt );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_7_kernel(
int num_rows,
int num_cols,
double beta,
double *pt,
double *v,
double *vt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
vt[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
vt = pt - beta * v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
vt magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_7(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
magmaDouble_ptr pt,
magmaDouble_ptr v,
magmaDouble_ptr vt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_7_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, pt, v, vt );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_8_kernel(
int num_rows,
int num_cols,
double rho,
double psi,
double *vt,
double *wt,
double *y,
double *z,
double *v,
double *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
y[ i+j*num_rows ] = y[ i+j*num_rows ] / rho;
v[ i+j*num_rows ] = vt[ i+j*num_rows ] / rho;
z[ i+j*num_rows ] = z[ i+j*num_rows ] / psi;
w[ i+j*num_rows ] = wt[ i+j*num_rows ] / psi;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho double
scalar
@param[in]
psi double
scalar
@param[in]
vt magmaDouble_ptr
vector
@param[in]
wt magmaDouble_ptr
vector
@param[in,out]
y magmaDouble_ptr
vector
@param[in,out]
z magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
w magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_8(
magma_int_t num_rows,
magma_int_t num_cols,
double rho,
double psi,
magmaDouble_ptr vt,
magmaDouble_ptr wt,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr v,
magmaDouble_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dqmr_8_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, rho, psi,
vt, wt, y, z, v, w );
return MAGMA_SUCCESS;
}
|
96c11dbdd5dd62112bc64b8cf05b1dcedc38b6cc.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zmergeqmr.cu, normal z -> d, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from qmr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dqmr_1_kernel(
int num_rows,
int num_cols,
double rho,
double psi,
double *y,
double *z,
double *v,
double *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
double ztmp = z[ i+j*num_rows ] / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho double
scalar
@param[in]
psi double
scalar
@param[in,out]
y magmaDouble_ptr
vector
@param[in,out]
z magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
w magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_1(
magma_int_t num_rows,
magma_int_t num_cols,
double rho,
double psi,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr v,
magmaDouble_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, rho, psi,
y, z, v, w );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_2_kernel(
int num_rows,
int num_cols,
double pde,
double rde,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr p,
magmaDouble_ptr q )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
p[ i+j*num_rows ] = y[ i+j*num_rows ] - pde * p[ i+j*num_rows ];
q[ i+j*num_rows ] = z[ i+j*num_rows ] - rde * q[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = y - pde * p
q = z - rde * q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
pde double
scalar
@param[in]
rde double
scalar
@param[in]
y magmaDouble_ptr
vector
@param[in]
z magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in,out]
q magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_2(
magma_int_t num_rows,
magma_int_t num_cols,
double pde,
double rde,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr p,
magmaDouble_ptr q,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, pde, rde, y, z, p, q );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_3_kernel(
int num_rows,
int num_cols,
double beta,
double *pt,
double *v,
double *y )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
v[ i+j*num_rows ] = tmp;
y[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = pt - beta * v
y = v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
y magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_3(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
magmaDouble_ptr pt,
magmaDouble_ptr v,
magmaDouble_ptr y,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, pt, v, y );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_4_kernel(
int num_rows,
int num_cols,
double eta,
double *p,
double *pt,
double *d,
double *s,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmpd = eta * p[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
double tmps = eta * pt[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p;
s = eta * pt;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta double
scalar
@param[in]
p magmaDouble_ptr
vector
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
d magmaDouble_ptr
vector
@param[in,out]
s magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_4(
magma_int_t num_rows,
magma_int_t num_cols,
double eta,
magmaDouble_ptr p,
magmaDouble_ptr pt,
magmaDouble_ptr d,
magmaDouble_ptr s,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_5_kernel(
int num_rows,
int num_cols,
double eta,
double pds,
double *p,
double *pt,
double *d,
double *s,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmpd = eta * p[ i+j*num_rows ] + pds * d[ i+j*num_rows ];
d[ i+j*num_rows ] = tmpd;
x[ i+j*num_rows ] = x[ i+j*num_rows ] + tmpd;
double tmps = eta * pt[ i+j*num_rows ] + pds * s[ i+j*num_rows ];
s[ i+j*num_rows ] = tmps;
r[ i+j*num_rows ] = r[ i+j*num_rows ] - tmps;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
d = eta * p + pds * d;
s = eta * pt + pds * s;
x = x + d;
r = r - s;
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
eta double
scalar
@param[in]
pds double
scalar
@param[in]
p magmaDouble_ptr
vector
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
d magmaDouble_ptr
vector
@param[in,out]
s magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_5(
magma_int_t num_rows,
magma_int_t num_cols,
double eta,
double pds,
magmaDouble_ptr p,
magmaDouble_ptr pt,
magmaDouble_ptr d,
magmaDouble_ptr s,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_5_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, pds, p, pt, d, s, x, r );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_6_kernel(
int num_rows,
int num_cols,
double beta,
double rho,
double psi,
double *y,
double *z,
double *v,
double *w,
double *wt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double wttmp = wt[ i+j*num_rows ]
- MAGMA_D_CONJ( beta ) * w[ i+j*num_rows ];
wt[ i+j*num_rows ] = wttmp;
double ztmp = wttmp / psi;
z[ i+j*num_rows ] = ztmp;
w[ i+j*num_rows ] = ztmp;
double ytmp = y[ i+j*num_rows ] / rho;
y[ i+j*num_rows ] = ytmp;
v[ i+j*num_rows ] = ytmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
wt = wt - conj(beta) * w
v = y / rho
y = y / rho
w = wt / psi
z = wt / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
rho double
scalar
@param[in]
psi double
scalar
@param[in,out]
y magmaDouble_ptr
vector
@param[in,out]
z magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
w magmaDouble_ptr
vector
@param[in,out]
wt magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_6(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
double rho,
double psi,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr v,
magmaDouble_ptr w,
magmaDouble_ptr wt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_6_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, rho, psi,
y, z, v, w, wt );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_7_kernel(
int num_rows,
int num_cols,
double beta,
double *pt,
double *v,
double *vt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp = pt[ i+j*num_rows ] - beta * v[ i+j*num_rows ];
vt[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
vt = pt - beta * v
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
pt magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
vt magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_7(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
magmaDouble_ptr pt,
magmaDouble_ptr v,
magmaDouble_ptr vt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_7_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, pt, v, vt );
return MAGMA_SUCCESS;
}
__global__ void
magma_dqmr_8_kernel(
int num_rows,
int num_cols,
double rho,
double psi,
double *vt,
double *wt,
double *y,
double *z,
double *v,
double *w )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
y[ i+j*num_rows ] = y[ i+j*num_rows ] / rho;
v[ i+j*num_rows ] = vt[ i+j*num_rows ] / rho;
z[ i+j*num_rows ] = z[ i+j*num_rows ] / psi;
w[ i+j*num_rows ] = wt[ i+j*num_rows ] / psi;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
v = y / rho
y = y / rho
w = wt / psi
z = z / psi
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
rho double
scalar
@param[in]
psi double
scalar
@param[in]
vt magmaDouble_ptr
vector
@param[in]
wt magmaDouble_ptr
vector
@param[in,out]
y magmaDouble_ptr
vector
@param[in,out]
z magmaDouble_ptr
vector
@param[in,out]
v magmaDouble_ptr
vector
@param[in,out]
w magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dqmr_8(
magma_int_t num_rows,
magma_int_t num_cols,
double rho,
double psi,
magmaDouble_ptr vt,
magmaDouble_ptr wt,
magmaDouble_ptr y,
magmaDouble_ptr z,
magmaDouble_ptr v,
magmaDouble_ptr w,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dqmr_8_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, rho, psi,
vt, wt, y, z, v, w );
return MAGMA_SUCCESS;
}
|
540a478433831eb8e03b50a7bba667fee1dae30d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd1.cuh"
#include "../include/dervfields_cd1.cuh"
#include "../include/usersource_cd1.cuh"
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
dw[fencode3_cd1(p,ii,field)]+= grad3dn_cd1(wd,wd,p,ii,flux,dir);
return ( status);
}
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/(w[fencode3_cd1(p,ii,rho)]+w[fencode3_cd1(p,ii,rhob)]));
#else
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/w[fencode3_cd1(p,ii,rho)]);
#endif
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return( -(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom10 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom11 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom12 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==2?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction)+(w[fencode3_cd1(p,ii,rhob)]*w[fencode3_cd1(p,ii,mom1+direction)])/(w[fencode3_cd1(p,ii,rhob)]+w[fencode3_cd1(p,ii,rho)]);
#else
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]=0.0;
wd[fencode3_cd1(p,ii,flux)]+=transportflux(dw,wd,w,p,ii,field,direction)+fluxmom12(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef ADIABHYDRO
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ii,dir);
break;
case mom1:
computefluxmom1(dw,wd,w,p,ii,field,dir);
break;
case mom2:
computefluxmom2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case mom3:
computefluxmom3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
}
__global__ void centdiff1init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff1a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]>1 && ii[1] >1 && ii[0]<(ni-2) && ii[1]<(nj-2))
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] >1 && ii[2] >1 && ii[0]<(ni-2) && ii[1]<(nj-2) && ii[2]<(nk-2))
#endif
divflux1(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff1af_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 1:
#ifdef USE_SAC
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) )
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 2:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<(ni-2) && ii[1]>1 && ii[1]<(nj-2) && ii[2] <(nk) )
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
#endif
break;
}
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==0 && ii[0]==124 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.225;
w[fencode3_cd1(p,ii,rho)]=0.225;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.22114;
w[fencode3_cd1(p,ii,rho)]=0.22114;
}*/
__syncthreads();
}
__global__ void centdiff1binit_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_USERSOURCE))
{
ii[0]=ip;
ii[1]=jp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms1_cd1(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
}
__syncthreads();
#endif
// }
}
__global__ void centdiff1bf_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if( ii[1] <(nj) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1] <(nj) && ii[0]<(ni) && ii[2] <(nk) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff1(struct params **p, struct params **d_p,struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real dt, int field, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( centdiff1init_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1a_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff1af_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
hipDeviceSynchronize();
}
|
540a478433831eb8e03b50a7bba667fee1dae30d.cu
|
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd1.cuh"
#include "../include/dervfields_cd1.cuh"
#include "../include/usersource_cd1.cuh"
__device__ __host__
int divflux1(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
dw[fencode3_cd1(p,ii,field)]+= grad3dn_cd1(wd,wd,p,ii,flux,dir);
return ( status);
}
__device__ __host__
real transportflux (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/(w[fencode3_cd1(p,ii,rho)]+w[fencode3_cd1(p,ii,rhob)]));
#else
return(w[fencode3_cd1(p,ii,mom1+direction)]*w[fencode3_cd1(p,ii,field)]/w[fencode3_cd1(p,ii,rho)]);
#endif
}
__device__ __host__
real fluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return( -(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom10 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==0?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom11 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==1?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
real fluxmom12 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
#if defined USE_SAC || defined USE_SAC_3D
return(direction==2?wd[fencode3_cd1(p,ii,pressuret)]-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]:-(w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1b+direction)]+w[fencode3_cd1(p,ii,field+(2*NDIM+3))]*w[fencode3_cd1(p,ii,b1+direction)])-w[fencode3_cd1(p,ii,field+(NDIM+1))]*w[fencode3_cd1(p,ii,b1+direction)]);
#endif
}
__device__ __host__
int computefluxrho (real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction)+(w[fencode3_cd1(p,ii,rhob)]*w[fencode3_cd1(p,ii,mom1+direction)])/(w[fencode3_cd1(p,ii,rhob)]+w[fencode3_cd1(p,ii,rho)]);
#else
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,rho,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]=0.0;
wd[fencode3_cd1(p,ii,flux)]+=transportflux(dw,wd,w,p,ii,field,direction)+fluxmom12(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom11(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxmom1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
#ifdef ADIABHYDRO
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC
wd[fencode3_cd1(p,ii,flux)]+= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
#ifdef USE_SAC_3D
wd[fencode3_cd1(p,ii,flux)]= transportflux(dw,wd,w,p,ii,field,direction)+fluxmom10(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
switch(field)
{
case rho:
computefluxrho(dw,wd,w,p,ii,dir);
break;
case mom1:
computefluxmom1(dw,wd,w,p,ii,field,dir);
break;
case mom2:
computefluxmom2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case mom3:
computefluxmom3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
}
__global__ void centdiff1init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
wd[fencode3_cd1(p,ii,flux)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff1a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]>1 && ii[1] >1 && ii[0]<(ni-2) && ii[1]<(nj-2))
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] >1 && ii[2] >1 && ii[0]<(ni-2) && ii[1]<(nj-2) && ii[2]<(nk-2))
#endif
divflux1(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff1af_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1]>1 && ii[1] <(nj-2) && ii[0]<(ni) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 1:
#ifdef USE_SAC
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) )
#endif
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[1] <(nj) && ii[0]<(ni-2) && ii[2]>1 && ii[2] <(nk-2) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
break;
case 2:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<(ni-2) && ii[1]>1 && ii[1]<(nj-2) && ii[2] <(nk) )
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
#endif
break;
}
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==0 && ii[0]==124 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.225;
w[fencode3_cd1(p,ii,rho)]=0.225;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd1(p,ii,rho)]=0.22114;
w[fencode3_cd1(p,ii,rho)]=0.22114;
}*/
__syncthreads();
}
__global__ void centdiff1binit_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd1(p,ii,f)]=0.0;
}
__syncthreads();
}
__global__ void centdiff1b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_USERSOURCE))
{
ii[0]=ip;
ii[1]=jp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms1_cd1(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
}
__syncthreads();
#endif
// }
}
__global__ void centdiff1bf_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt, int f, int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int fid;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dy=p->dx[1];
real dx=p->dx[0];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
real dz=p->dx[2];
#endif
#ifdef USE_SAC_3D
int kp;
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if( ii[1] <(nj) && ii[0]<(ni) )
#endif
#ifdef USE_SAC_3D
if(ii[1] <(nj) && ii[0]<(ni) && ii[2] <(nk) )
#endif
wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd1(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd1(p,ii,f)];
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd1(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff1(struct params **p, struct params **d_p,struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order, int ordero, real dt, int field, int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
centdiff1init_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1a_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff1af_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w,*d_wmod, *d_dwn1, *d_wd, order, ordero,dt,field,dir);
cudaThreadSynchronize();
}
|
d725af464bd674c40b9433ecbe8f63eee7a3b1a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
|
d725af464bd674c40b9433ecbe8f63eee7a3b1a7.cu
|
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
22d54f92e80e6ca92353db58a3e3cc7fa159b336.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void scan(float * input, float * output, float* blocksum, int len)
{
//@@ Functionality of the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
__shared__ float XY[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
XY[t] = start + t > len ? 0.0 : input[start + t];
XY[blockDim.x+t] = start + blockDim.x+t > len ? 0.0 : input[start + blockDim.x+t];
for (int stride = 1;stride <= BLOCK_SIZE; stride *= 2)
{
int index = (threadIdx.x+1)*stride*2 - 1;
if(index < 2*BLOCK_SIZE)
XY[index] += XY[index-stride];
__syncthreads();
}
for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2)
{
__syncthreads();
int index = (threadIdx.x+1)*stride*2 - 1;
if(index+stride < 2*BLOCK_SIZE)
{
XY[index + stride] += XY[index];
}
}
__syncthreads();
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len)
{
output[i] = XY[threadIdx.x];
if ((i+1)%blockDim.x == 0) blocksum[i/blockDim.x]=output[i];
}
}
__global__ void helper(float * output, float * blocksum, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len)
{
for (int j=0; j < i/blockDim.x; j++)
output[i] += blocksum[j];
}
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
float * deviceTemp;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(hipMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(hipMalloc((void**)&deviceOutput, numElements*sizeof(float)));
//@@ deviceTemp
wbCheck(hipMalloc((void**)&deviceTemp, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(hipMemset(deviceOutput, 0, numElements*sizeof(float)));
//@@ deviceTemp
wbCheck(hipMemset(deviceTemp, 0, numElements/BLOCK_SIZE * sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(hipMemcpy(deviceInput, hostInput, numElements*sizeof(float), hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid((numElements + BLOCK_SIZE - 1)/BLOCK_SIZE, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
wbLog(TRACE, "DimGrid ", (numElements + BLOCK_SIZE - 1)/BLOCK_SIZE);
wbLog(TRACE, "DimBlock ", BLOCK_SIZE);
//@@ Initialized grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
hipLaunchKernelGGL(( scan), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInput, deviceOutput, deviceTemp, numElements);
hipLaunchKernelGGL(( helper), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceOutput, deviceTemp, numElements);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
hipFree(deviceTemp);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
|
22d54f92e80e6ca92353db58a3e3cc7fa159b336.cu
|
// MP Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while(0)
__global__ void scan(float * input, float * output, float* blocksum, int len)
{
//@@ Functionality of the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
__shared__ float XY[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
XY[t] = start + t > len ? 0.0 : input[start + t];
XY[blockDim.x+t] = start + blockDim.x+t > len ? 0.0 : input[start + blockDim.x+t];
for (int stride = 1;stride <= BLOCK_SIZE; stride *= 2)
{
int index = (threadIdx.x+1)*stride*2 - 1;
if(index < 2*BLOCK_SIZE)
XY[index] += XY[index-stride];
__syncthreads();
}
for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2)
{
__syncthreads();
int index = (threadIdx.x+1)*stride*2 - 1;
if(index+stride < 2*BLOCK_SIZE)
{
XY[index + stride] += XY[index];
}
}
__syncthreads();
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len)
{
output[i] = XY[threadIdx.x];
if ((i+1)%blockDim.x == 0) blocksum[i/blockDim.x]=output[i];
}
}
__global__ void helper(float * output, float * blocksum, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len)
{
for (int j=0; j < i/blockDim.x; j++)
output[i] += blocksum[j];
}
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
float * deviceTemp;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(cudaMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(cudaMalloc((void**)&deviceOutput, numElements*sizeof(float)));
//@@ deviceTemp
wbCheck(cudaMalloc((void**)&deviceTemp, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(cudaMemset(deviceOutput, 0, numElements*sizeof(float)));
//@@ deviceTemp
wbCheck(cudaMemset(deviceTemp, 0, numElements/BLOCK_SIZE * sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(cudaMemcpy(deviceInput, hostInput, numElements*sizeof(float), cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid((numElements + BLOCK_SIZE - 1)/BLOCK_SIZE, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
wbLog(TRACE, "DimGrid ", (numElements + BLOCK_SIZE - 1)/BLOCK_SIZE);
wbLog(TRACE, "DimBlock ", BLOCK_SIZE);
//@@ Initialized grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
scan<<<DimGrid,DimBlock>>>(deviceInput, deviceOutput, deviceTemp, numElements);
helper<<<DimGrid,DimBlock>>>(deviceOutput, deviceTemp, numElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
cudaFree(deviceTemp);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
|
1ccd5e84b3581d50e1efa1d9561638b451865acc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void swan_fast_fill( uint4 *ptr, int len ) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if( idx<len) {
ptr[idx] = make_uint4( 0,0,0,0 );
}
}
|
1ccd5e84b3581d50e1efa1d9561638b451865acc.cu
|
#include "includes.h"
__global__ void swan_fast_fill( uint4 *ptr, int len ) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if( idx<len) {
ptr[idx] = make_uint4( 0,0,0,0 );
}
}
|
f9713333cca89b8baffe4a1fa005d967af94aeab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_apply_adagrad_d_a_impl.cuh"
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <hipcub/hipcub.hpp>
#include <iostream>
#include <algorithm>
#include <vector>
#include <typeinfo>
#include "include/hip/hip_fp16.h"
template <typename T>
__device__ __forceinline__ T AbsFunc(T x) {
return abs(x);
}
template <>
__device__ __forceinline__ half AbsFunc(half x) {
return abs(__half2float(x));
}
template <typename T>
__device__ __forceinline__ T MaxFunc(T x, T y) {
return max(x, y);
}
template <>
__device__ __forceinline__ half MaxFunc(half x, half y) {
return max(__half2float(x), __half2float(y));
}
template <typename T>
__device__ __forceinline__ T Sign(T num) {
if (num > static_cast<T>(0.0)) {
return static_cast<T>(1.0);
} else if (num == static_cast<T>(0.0)) {
return static_cast<T>(0.0);
} else {
return static_cast<T>(-1.0);
}
}
template <typename T, typename S, typename S2>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, T *var, T *accum, T *squared_accum,
const T *grad, const T *lr, const T *l1, const T *l2, const S2 *global_step,
int32_t *rows_index, S *indices_sort, int32_t *thready_pos_shrink,
int32_t shrink_num) {
T zero = static_cast<T>(0.0);
T minus_one = static_cast<T>(-1);
T global_step_scalar = static_cast<T>(static_cast<double>(global_step[0]));
T gs_lr = global_step_scalar * lr[0];
T l1_scalar = l1[0];
T l2_scalar = l2[0];
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
S update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= zero) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] =
minus_one * static_cast<T>(Sign(static_cast<double>(accum[update_pos]))) *
static_cast<T>(MaxFunc(
static_cast<double>((static_cast<T>(AbsFunc(static_cast<double>(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<double>(0.0))) /
(l2_scalar + static_cast<T>(sqrt(static_cast<double>(squared_accum[update_pos]))) / gs_lr);
} else {
var[update_pos] = minus_one * (accum[update_pos] / global_step_scalar) /
(l2_scalar + static_cast<T>(sqrt(static_cast<double>(squared_accum[update_pos]))) / gs_lr);
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int32_t *global_step, int32_t *rows_index, int32_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int32_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = zero;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int64_t *global_step, int32_t *rows_index, int32_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int32_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int64_t *global_step, int32_t *rows_index, int64_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int64_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int32_t *global_step, int32_t *rows_index, int64_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int64_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <typename S>
__global__ void SumOfRows(S *indices_sort, size_t indices_num, int32_t *thready_pos) {
for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < indices_num; idx += blockDim.x * gridDim.x) {
if (idx == 0 || idx == indices_num - 1 || indices_sort[idx] != indices_sort[idx - 1]) {
thready_pos[idx] = static_cast<S>(idx);
} else {
thready_pos[idx] = static_cast<int32_t>(-1);
}
}
}
struct GreaterThan {
__host__ __device__ __forceinline__ bool operator()(const int32_t &val) const { return (val > -1); }
};
template <typename T, typename S, typename S2>
void CalSparseApplyAdagradDA(const size_t batch_size, size_t indices_size, const size_t size,
T *var, T *accum, T *squared_accum, const T *grad, const S *indices,
const T *lr, const T *l1, const T *l2, const S2 *global_step, T *output_var,
S *indices_sort, int32_t *rows_index, int32_t *thready_pos,
int32_t *thready_pos_shrink, int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream) {
auto policy = thrust::hip::par.on(cuda_stream);
thrust::sequence(policy,
thrust::device_pointer_cast(rows_index),
thrust::device_pointer_cast(rows_index) + indices_size);
thrust::copy(thrust::device_pointer_cast(indices),
thrust::device_pointer_cast(indices) + indices_size,
thrust::device_pointer_cast(indices_sort));
thrust::stable_sort_by_key(policy,
thrust::device_pointer_cast(indices_sort),
thrust::device_pointer_cast(indices_sort) + indices_size,
thrust::device_pointer_cast(rows_index));
const int inner_size = static_cast<int>(size / indices_size);
hipLaunchKernelGGL(( SumOfRows), dim3(CUDA_BLOCKS(device_id, indices_size + 1)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
indices_sort, indices_size + 1, thready_pos);
GreaterThan greater;
void *s_temp_storage = nullptr;
size_t s_temp_storage_bytes = 0;
(void)hipcub::DeviceSelect::If(nullptr, s_temp_storage_bytes, static_cast<float *>(nullptr),
static_cast<float *>(nullptr), static_cast<int *>(nullptr), indices_size + 1, greater,
cuda_stream);
(void)hipMalloc(&s_temp_storage, s_temp_storage_bytes);
(void)hipcub::DeviceSelect::If(s_temp_storage, s_temp_storage_bytes, thready_pos, thready_pos_shrink, shrink_num,
indices_size + 1, greater, cuda_stream);
hipFree(s_temp_storage);
hipStreamSynchronize(reinterpret_cast<hipStream_t>(cuda_stream));
int32_t h_shrink_num = 0;
hipMemcpy(&h_shrink_num, shrink_num, sizeof(int32_t), hipMemcpyDeviceToHost);
std::vector<int> thready_pos_shrink_h(indices_size + 1);
hipMemcpy(thready_pos_shrink_h.data(), thready_pos_shrink, h_shrink_num * sizeof(int32_t), hipMemcpyDeviceToHost);
int32_t thread_y = h_shrink_num - 1 > 128 ? 128 : (h_shrink_num - 1);
int pow_num = static_cast<int>(log(thread_y * 1.0) / log(2.0)) + 1;
thread_y = static_cast<int>(pow(2.0, pow_num));
int32_t thread_x = 512 / thread_y > inner_size ? inner_size : (512 / thread_y);
hipDeviceProp_t prop;
(void)hipGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_y = (h_shrink_num - 1) / thread_y > 8 ? 8 : ((h_shrink_num - 1) / thread_y);
block_y = block_y == 0 ? 1 : block_y;
int32_t need_block_x = (inner_size - 1) / thread_x + 1;
int block_x = need_block_x > (max_blocks / block_y) ? (max_blocks / block_y) : need_block_x;
dim3 block_dim(thread_x, thread_y);
dim3 grid_dim(block_x, block_y);
hipLaunchKernelGGL(( SparseApplyAdagradDAKernel), dim3(grid_dim), dim3(block_dim), 0, cuda_stream,
inner_size, var, accum, squared_accum, grad, lr, l1, l2,
global_step, rows_index, indices_sort, thready_pos_shrink, h_shrink_num);
hipStreamSynchronize(reinterpret_cast<hipStream_t>(cuda_stream));
hipMemcpy(output_var, var, size * sizeof(T), hipMemcpyDeviceToDevice);
}
template <typename T, typename S, typename S1>
CUDA_LIB_EXPORT void CalSparseApplyAdagradDA(const size_t batch_size,
size_t indices_size, const size_t size,
T *var, T *accum, T *squared_accum,
const T *grad, const S *indices, const T *lr,
const T *l1, const T *l2, const S1 *global_step,
T *output_var, S *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int32_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int64_t *global_step, int8_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int64_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int64_t *global_step, int8_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int32_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int32_t *global_step, int8_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int64_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int32_t *global_step, int8_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int32_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int64_t *global_step, int16_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int64_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int64_t *global_step, int16_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int32_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int32_t *global_step, int16_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int64_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int32_t *global_step, int16_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int32_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int64_t *global_step, int32_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int64_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int64_t *global_step, int32_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int32_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int32_t *global_step, int32_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int64_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int32_t *global_step, int32_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int32_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int64_t *global_step, int64_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int64_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int64_t *global_step, int64_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int32_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int32_t *global_step, int64_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int64_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int32_t *global_step, int64_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int32_t *indices, const double *lr,
const double *l1, const double *l2,
const int64_t *global_step, double *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int64_t *indices, const double *lr,
const double *l1, const double *l2,
const int64_t *global_step, double *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int32_t *indices, const double *lr,
const double *l1, const double *l2,
const int32_t *global_step, double *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int64_t *indices, const double *lr,
const double *l1, const double *l2,
const int32_t *global_step, double *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int32_t *indices, const float *lr,
const float *l1, const float *l2,
const int64_t *global_step, float *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int64_t, int64_t>(const size_t batch_size,
size_t indices_size,
const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int64_t *indices, const float *lr,
const float *l1, const float *l2,
const int64_t *global_step, float *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int32_t *indices, const float *lr,
const float *l1, const float *l2,
const int32_t *global_step, float *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int64_t, int32_t>(const size_t batch_size,
size_t indices_size,
const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int64_t *indices, const float *lr,
const float *l1, const float *l2,
const int32_t *global_step, float *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int32_t, int64_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int32_t *indices, const half *lr,
const half *l1, const half *l2,
const int64_t *global_step, half *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int64_t, int64_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int64_t *indices, const half *lr,
const half *l1, const half *l2,
const int64_t *global_step, half *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int32_t, int32_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int32_t *indices, const half *lr,
const half *l1, const half *l2,
const int32_t *global_step, half *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int64_t, int32_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int64_t *indices, const half *lr,
const half *l1, const half *l2,
const int32_t *global_step, half *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
hipStream_t cuda_stream);
|
f9713333cca89b8baffe4a1fa005d967af94aeab.cu
|
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/sparse_apply_adagrad_d_a_impl.cuh"
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <cub/cub.cuh>
#include <iostream>
#include <algorithm>
#include <vector>
#include <typeinfo>
#include "include/cuda_fp16.h"
template <typename T>
__device__ __forceinline__ T AbsFunc(T x) {
return abs(x);
}
template <>
__device__ __forceinline__ half AbsFunc(half x) {
return abs(__half2float(x));
}
template <typename T>
__device__ __forceinline__ T MaxFunc(T x, T y) {
return max(x, y);
}
template <>
__device__ __forceinline__ half MaxFunc(half x, half y) {
return max(__half2float(x), __half2float(y));
}
template <typename T>
__device__ __forceinline__ T Sign(T num) {
if (num > static_cast<T>(0.0)) {
return static_cast<T>(1.0);
} else if (num == static_cast<T>(0.0)) {
return static_cast<T>(0.0);
} else {
return static_cast<T>(-1.0);
}
}
template <typename T, typename S, typename S2>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, T *var, T *accum, T *squared_accum,
const T *grad, const T *lr, const T *l1, const T *l2, const S2 *global_step,
int32_t *rows_index, S *indices_sort, int32_t *thready_pos_shrink,
int32_t shrink_num) {
T zero = static_cast<T>(0.0);
T minus_one = static_cast<T>(-1);
T global_step_scalar = static_cast<T>(static_cast<double>(global_step[0]));
T gs_lr = global_step_scalar * lr[0];
T l1_scalar = l1[0];
T l2_scalar = l2[0];
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
S update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= zero) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] =
minus_one * static_cast<T>(Sign(static_cast<double>(accum[update_pos]))) *
static_cast<T>(MaxFunc(
static_cast<double>((static_cast<T>(AbsFunc(static_cast<double>(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<double>(0.0))) /
(l2_scalar + static_cast<T>(sqrt(static_cast<double>(squared_accum[update_pos]))) / gs_lr);
} else {
var[update_pos] = minus_one * (accum[update_pos] / global_step_scalar) /
(l2_scalar + static_cast<T>(sqrt(static_cast<double>(squared_accum[update_pos]))) / gs_lr);
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int32_t *global_step, int32_t *rows_index, int32_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int32_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = zero;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int64_t *global_step, int32_t *rows_index, int32_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int32_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int64_t *global_step, int32_t *rows_index, int64_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int64_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <>
__global__ void SparseApplyAdagradDAKernel(const size_t inner_size, half *var, half *accum, half *squared_accum,
const half *grad, const half *lr, const half *l1, const half *l2,
const int32_t *global_step, int32_t *rows_index, int64_t *indices_sort,
int32_t *thready_pos_shrink, int32_t shrink_num) {
float zero = static_cast<float>(0.0);
float minus_one = static_cast<float>(-1);
float global_step_scalar = static_cast<float>(global_step[0]);
float gs_lr = global_step_scalar * __half2float(lr[0]);
float l1_scalar = __half2float(l1[0]);
float l2_scalar = __half2float(l2[0]);
for (size_t pos_x = blockIdx.x * blockDim.x + threadIdx.x; pos_x < inner_size; pos_x += gridDim.x * blockDim.x) {
for (size_t pos_y = blockIdx.y * blockDim.y + threadIdx.y; pos_y < shrink_num - 1;
pos_y += gridDim.y * blockDim.y) {
int32_t start_row = thready_pos_shrink[pos_y];
int32_t end_row = thready_pos_shrink[pos_y + 1];
int64_t update_pos = indices_sort[start_row] * inner_size + pos_x;
for (int idx = start_row; idx < end_row; ++idx) {
int grad_pos = rows_index[idx] * inner_size + pos_x;
accum[update_pos] += grad[grad_pos];
squared_accum[update_pos] += grad[grad_pos] * grad[grad_pos];
if (squared_accum[update_pos] <= __float2half(zero)) {
var[update_pos] = NAN;
continue;
}
if (gs_lr == zero) {
var[update_pos] = 0;
continue;
}
if (l1_scalar > zero) {
var[update_pos] = __float2half(
minus_one * (Sign(__half2float(accum[update_pos]))) *
(MaxFunc((((AbsFunc(__half2float(accum[update_pos]))) /
global_step_scalar) - l1_scalar), static_cast<float>(0.0))) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
} else {
var[update_pos] = __float2half(minus_one * (__half2float(accum[update_pos]) / global_step_scalar) /
(l2_scalar + (sqrt(__half2float(squared_accum[update_pos]))) / gs_lr));
}
}
}
}
}
template <typename S>
__global__ void SumOfRows(S *indices_sort, size_t indices_num, int32_t *thready_pos) {
for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < indices_num; idx += blockDim.x * gridDim.x) {
if (idx == 0 || idx == indices_num - 1 || indices_sort[idx] != indices_sort[idx - 1]) {
thready_pos[idx] = static_cast<S>(idx);
} else {
thready_pos[idx] = static_cast<int32_t>(-1);
}
}
}
struct GreaterThan {
__host__ __device__ __forceinline__ bool operator()(const int32_t &val) const { return (val > -1); }
};
template <typename T, typename S, typename S2>
void CalSparseApplyAdagradDA(const size_t batch_size, size_t indices_size, const size_t size,
T *var, T *accum, T *squared_accum, const T *grad, const S *indices,
const T *lr, const T *l1, const T *l2, const S2 *global_step, T *output_var,
S *indices_sort, int32_t *rows_index, int32_t *thready_pos,
int32_t *thready_pos_shrink, int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream) {
auto policy = thrust::cuda::par.on(cuda_stream);
thrust::sequence(policy,
thrust::device_pointer_cast(rows_index),
thrust::device_pointer_cast(rows_index) + indices_size);
thrust::copy(thrust::device_pointer_cast(indices),
thrust::device_pointer_cast(indices) + indices_size,
thrust::device_pointer_cast(indices_sort));
thrust::stable_sort_by_key(policy,
thrust::device_pointer_cast(indices_sort),
thrust::device_pointer_cast(indices_sort) + indices_size,
thrust::device_pointer_cast(rows_index));
const int inner_size = static_cast<int>(size / indices_size);
SumOfRows<<<CUDA_BLOCKS(device_id, indices_size + 1), CUDA_THREADS(device_id), 0, cuda_stream>>>(
indices_sort, indices_size + 1, thready_pos);
GreaterThan greater;
void *s_temp_storage = nullptr;
size_t s_temp_storage_bytes = 0;
(void)cub::DeviceSelect::If(nullptr, s_temp_storage_bytes, static_cast<float *>(nullptr),
static_cast<float *>(nullptr), static_cast<int *>(nullptr), indices_size + 1, greater,
cuda_stream);
(void)cudaMalloc(&s_temp_storage, s_temp_storage_bytes);
(void)cub::DeviceSelect::If(s_temp_storage, s_temp_storage_bytes, thready_pos, thready_pos_shrink, shrink_num,
indices_size + 1, greater, cuda_stream);
cudaFree(s_temp_storage);
cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(cuda_stream));
int32_t h_shrink_num = 0;
cudaMemcpy(&h_shrink_num, shrink_num, sizeof(int32_t), cudaMemcpyDeviceToHost);
std::vector<int> thready_pos_shrink_h(indices_size + 1);
cudaMemcpy(thready_pos_shrink_h.data(), thready_pos_shrink, h_shrink_num * sizeof(int32_t), cudaMemcpyDeviceToHost);
int32_t thread_y = h_shrink_num - 1 > 128 ? 128 : (h_shrink_num - 1);
int pow_num = static_cast<int>(log(thread_y * 1.0) / log(2.0)) + 1;
thread_y = static_cast<int>(pow(2.0, pow_num));
int32_t thread_x = 512 / thread_y > inner_size ? inner_size : (512 / thread_y);
cudaDeviceProp prop;
(void)cudaGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_y = (h_shrink_num - 1) / thread_y > 8 ? 8 : ((h_shrink_num - 1) / thread_y);
block_y = block_y == 0 ? 1 : block_y;
int32_t need_block_x = (inner_size - 1) / thread_x + 1;
int block_x = need_block_x > (max_blocks / block_y) ? (max_blocks / block_y) : need_block_x;
dim3 block_dim(thread_x, thread_y);
dim3 grid_dim(block_x, block_y);
SparseApplyAdagradDAKernel<<<grid_dim, block_dim, 0, cuda_stream>>>(
inner_size, var, accum, squared_accum, grad, lr, l1, l2,
global_step, rows_index, indices_sort, thready_pos_shrink, h_shrink_num);
cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(cuda_stream));
cudaMemcpy(output_var, var, size * sizeof(T), cudaMemcpyDeviceToDevice);
}
template <typename T, typename S, typename S1>
CUDA_LIB_EXPORT void CalSparseApplyAdagradDA(const size_t batch_size,
size_t indices_size, const size_t size,
T *var, T *accum, T *squared_accum,
const T *grad, const S *indices, const T *lr,
const T *l1, const T *l2, const S1 *global_step,
T *output_var, S *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int32_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int64_t *global_step, int8_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int64_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int64_t *global_step, int8_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int32_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int32_t *global_step, int8_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int8_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int8_t *var,
int8_t *accum, int8_t *squared_accum, const int8_t *grad,
const int64_t *indices, const int8_t *lr,
const int8_t *l1, const int8_t *l2,
const int32_t *global_step, int8_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int32_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int64_t *global_step, int16_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int64_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int64_t *global_step, int16_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int32_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int32_t *global_step, int16_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int16_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int16_t *var,
int16_t *accum, int16_t *squared_accum,
const int16_t *grad,
const int64_t *indices, const int16_t *lr,
const int16_t *l1, const int16_t *l2,
const int32_t *global_step, int16_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int32_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int64_t *global_step, int32_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int64_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int64_t *global_step, int32_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int32_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int32_t *global_step, int32_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int32_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int32_t *var,
int32_t *accum, int32_t *squared_accum,
const int32_t *grad,
const int64_t *indices, const int32_t *lr,
const int32_t *l1, const int32_t *l2,
const int32_t *global_step, int32_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int32_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int64_t *global_step, int64_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int64_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int64_t *global_step, int64_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int32_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int32_t *global_step, int64_t *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<int64_t, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, int64_t *var,
int64_t *accum, int64_t *squared_accum,
const int64_t *grad,
const int64_t *indices, const int64_t *lr,
const int64_t *l1, const int64_t *l2,
const int32_t *global_step, int64_t *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int32_t *indices, const double *lr,
const double *l1, const double *l2,
const int64_t *global_step, double *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int64_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int64_t *indices, const double *lr,
const double *l1, const double *l2,
const int64_t *global_step, double *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int32_t *indices, const double *lr,
const double *l1, const double *l2,
const int32_t *global_step, double *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<double, int64_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, double *var,
double *accum, double *squared_accum, const double *grad,
const int64_t *indices, const double *lr,
const double *l1, const double *l2,
const int32_t *global_step, double *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int32_t, int64_t>(const size_t batch_size,
size_t indices_size, const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int32_t *indices, const float *lr,
const float *l1, const float *l2,
const int64_t *global_step, float *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int64_t, int64_t>(const size_t batch_size,
size_t indices_size,
const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int64_t *indices, const float *lr,
const float *l1, const float *l2,
const int64_t *global_step, float *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int32_t, int32_t>(const size_t batch_size,
size_t indices_size, const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int32_t *indices, const float *lr,
const float *l1, const float *l2,
const int32_t *global_step, float *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<float, int64_t, int32_t>(const size_t batch_size,
size_t indices_size,
const size_t size, float *var,
float *accum, float *squared_accum, const float *grad,
const int64_t *indices, const float *lr,
const float *l1, const float *l2,
const int32_t *global_step, float *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int32_t, int64_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int32_t *indices, const half *lr,
const half *l1, const half *l2,
const int64_t *global_step, half *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int64_t, int64_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int64_t *indices, const half *lr,
const half *l1, const half *l2,
const int64_t *global_step, half *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int32_t, int32_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int32_t *indices, const half *lr,
const half *l1, const half *l2,
const int32_t *global_step, half *output_var,
int32_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalSparseApplyAdagradDA<half, int64_t, int32_t>(const size_t batch_size,
size_t indices_size,
const size_t size, half *var,
half *accum, half *squared_accum, const half *grad,
const int64_t *indices, const half *lr,
const half *l1, const half *l2,
const int32_t *global_step, half *output_var,
int64_t *indices_sort, int32_t *rows_index,
int32_t *thready_pos, int32_t *thready_pos_shrink,
int32_t *shrink_num, const uint32_t &device_id,
cudaStream_t cuda_stream);
|
9787dce0f7a3fc0925d8c7ea8588bad7cfb840a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "linalg/matrix.h"
#include "linalg/cuda_util.h"
#include <cassert> // TODO: release-mode assert
#include <iostream>
#include <iomanip>
#include <math.h>
#include <hip/hip_runtime.h> // strangely, not needed by nvcc
#include <hiprand/hiprand.h>
#include <cereal/archives/portable_binary.hpp>
#include <cereal/types/memory.hpp>
#include <cereal/types/vector.hpp>
namespace matrix_cuda_util {
void SynchronizeForPerfLogging() {
hipDeviceSynchronize();
}
}
int Matrix::Index(int i, int j, int k) const {
return k * rows_ * cols_ + i * cols_ + j;
}
struct MatrixPack {
float* items;
int rows;
int cols;
int depth;
int layer_size;
explicit MatrixPack(const Matrix& m) :
items(m.data_.get()),
rows(m.rows()),
cols(m.cols()),
depth(m.depth()),
layer_size(m.rows() * m.cols()) {}
__forceinline__ __device__ float get(int i, int j, int k) {
return items[k * layer_size + i * cols + j];
}
__forceinline__ __device__ float get(int i, int j) {
return items[i * cols + j];
}
__forceinline__ __device__ void set(int i, int j, int k, float f) {
items[k * layer_size + i * cols + j] = f;
}
__forceinline__ __device__ void set(int i, int j, float f) {
items[i * cols + j] = f;
}
__forceinline__ __device__ void div(int i, int j, float f) {
items[i * cols + j] /= f;
}
__forceinline__ __device__ void add(int i, int j, float a) {
items[i * cols + j] += a;
}
__forceinline__ __device__ bool inside(int i, int j, int k) {
return i < rows && j < cols && k < depth;
}
__forceinline__ __device__ bool inside(int i, int j) {
return i < rows && j < cols;
}
};
dim3 CalculateBlocks(
const Matrix& result,
dim3 threads_per_block) {
return dim3(
(result.rows() + threads_per_block.x - 1) / threads_per_block.x,
(result.cols() + threads_per_block.y - 1) / threads_per_block.y,
(result.depth() + threads_per_block.z - 1) / threads_per_block.z);
}
Matrix::Matrix() :
rows_(0),
cols_(0),
depth_(0),
size_(0),
data_(NULL) {}
std::shared_ptr<float> AllocateData(int size) {
float* data;
CUDA_CALL(hipMalloc(&data, size * sizeof(float)));
return std::shared_ptr<float>(data, hipFree);
}
std::shared_ptr<float> ImportData(int size, const float* host_data) {
std::shared_ptr<float> device_data(AllocateData(size));
CUDA_CALL(hipMemcpy(
device_data.get(),
host_data,
size * sizeof(float),
hipMemcpyHostToDevice));
CUDA_ASYNC_CHECK();
return device_data;
}
Matrix::Matrix(int rows, int cols, int depth, const std::vector<float>& data) :
rows_(rows),
cols_(cols),
depth_(depth),
size_(rows * cols * depth) {
SetVector(data);
}
Matrix::Matrix(int rows, int cols, int depth) :
rows_(rows),
cols_(cols),
depth_(depth),
size_(rows * cols * depth) {
data_ = AllocateData(size_);
}
std::shared_ptr<float> Matrix::get_host_data() const {
std::shared_ptr<float> host_data;
host_data.reset(new float[size_], std::default_delete<float[]>() );
CUDA_CALL(hipMemcpy(
host_data.get(),
data_.get(),
size_ * sizeof(float),
hipMemcpyDeviceToHost));
return host_data;
}
void Matrix::SetVector(const std::vector<float>& data) {
assert(data.size() == size_);
data_ = ImportData(size_, &data[0]);
}
std::vector<float> Matrix::GetVector() const {
std::shared_ptr<float> host_data(get_host_data());
std::vector<float> v;
for (int k = 0; k < depth_; ++k) {
for (int i = 0; i < rows_; ++i) {
for (int j = 0; j < cols_; ++j) {
v.push_back(host_data.get()[Index(i, j, k)]);
}
}
}
return v;
}
void Matrix::Print() const {
std::cout << std::fixed << std::setw(4) << std::setprecision(2);
std::shared_ptr<float> host_data(get_host_data());
std::cout << "Matrix "
<< rows_ << "x"
<< cols_ << "x"
<< depth_
<< " (" << size_ << ")" << std::endl;
for (int k = 0; k < depth_; ++k) {
for (int i = 0; i < rows_; ++i) {
for (int j = 0; j < cols_; ++j) {
std::cout << host_data.get()[Index(i, j, k)] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
void Matrix::AssertEquals(const Matrix& other) const {
AssertSameDimensions(other);
assert(GetVector() == other.GetVector());
}
void Matrix::AssertDimensions(int rows, int cols, int depth) const {
assert(rows_ == rows && cols_ == cols && depth_ == depth);
}
void Matrix::AssertSameDimensions(const Matrix& other) const {
assert(rows_ == other.rows_ && cols_ == other.cols_ && depth_ == other.depth_);
}
void Matrix::AssertRows(int rows) const {
assert(rows_ == rows);
}
void Matrix::AssertDepth(int depth) const {
assert(depth_ == depth);
}
__global__ void MatrixTranspose(MatrixPack a, MatrixPack t) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (a.inside(i, j)) {
t.set(j, i, a.get(i, j));
}
}
Matrix Matrix::T() const {
assert(depth_ == 1);
Matrix result(cols_, rows_, depth_);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(*this, threads_per_block);
hipLaunchKernelGGL(( MatrixTranspose), dim3(blocks), dim3(threads_per_block), 0, 0,
MatrixPack(*this), MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void MatrixRot180(
MatrixPack a, MatrixPack r) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (a.inside(i, j, k)) {
r.set(r.rows - i - 1, r.cols - j - 1, k, a.get(i, j, k));
}
}
Matrix Matrix::Rot180() const {
Matrix result(rows_, cols_, depth_);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
hipLaunchKernelGGL(( MatrixRot180), dim3(blocks), dim3(threads_per_block), 0, 0,
MatrixPack(*this),
MatrixPack(result));
return result;
}
__global__ void MatrixDotProd(
MatrixPack a,
MatrixPack b,
MatrixPack c) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (c.inside(i, j)) {
float sum = 0.0;
for (int k = 0; k < a.cols; ++k) {
sum += a.get(i, k) * b.get(k, j);
}
c.set(i, j, sum);
}
}
Matrix Matrix::Dot(const Matrix& other) const {
assert(cols_ == other.rows_);
assert(depth_ == 1);
int c_rows = rows_;
int c_cols = other.cols_;
Matrix result(c_rows, c_cols, 1);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
hipLaunchKernelGGL(( MatrixDotProd), dim3(blocks), dim3(threads_per_block), 0, 0,
MatrixPack(*this),
MatrixPack(other),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void VecSigmoid(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = 1.0 / (1.0 + exp(-a[i]));
}
}
__global__ void VecSigmoidGradient(float* a, float*b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
float sigma = 1.0 / (1.0 + exp(-a[i]));
b[i] = sigma * (1.0 - sigma);
}
}
__global__ void VecReLU(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = max(0.0f, a[i]);
}
}
__global__ void VecReLUGradient(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
if (a[i] < 0.0f) {
b[i] = 0.0f;
} else {
b[i] = 1.0f;
}
}
}
__global__ void VecLReLU(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = max(0.01f * a[i], a[i]);
}
}
__global__ void VecLReLUGradient(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
if (a[i] < 0.0f) {
b[i] = 0.01f;
} else {
b[i] = 1.0f;
}
}
}
__global__ void VecSquare(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] * a[i];
}
}
__global__ void VecSqrt(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = sqrt(a[i]);
}
}
namespace matrix_mappers {
// We provide factory methdos instead of direct implementations
// so that users of device_matrix.h won't need to depend on
// CUDA stuff.
Map1Func Sigmoid() {
return &VecSigmoid;
}
Map1Func SigmoidGradient() {
return &VecSigmoidGradient;
}
Map1Func ReLU() {
return &VecReLU;
}
Map1Func ReLUGradient() {
return &VecReLUGradient;
}
Map1Func LReLU() {
return &VecLReLU;
}
Map1Func LReLUGradient() {
return &VecLReLUGradient;
}
Map1Func Square() {
return &VecSquare;
}
Map1Func Sqrt() {
return &VecSqrt;
}
} // namespacce matrix_mappers
Matrix Matrix::Map1(::matrix_mappers::Map1Func map) const {
Matrix result(rows_, cols_, depth_);
hipLaunchKernelGGL(( map), dim3((size_ + 255) / 256), dim3(256), 0, 0,
data_.get(),
result.data_.get(),
size_);
CUDA_ASYNC_CHECK();
return result;
}
__global__ void ElementwiseAddKernel(float* a, float* b, float* c, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
__global__ void ElementwiseMultiplyKernel(float* a, float* b, float* c, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
c[i] = a[i] * b[i];
}
}
__global__ void ElementwiseDivideKernel(float* a, float* b, float* c, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
c[i] = a[i] / b[i];
}
}
Matrix Matrix::Map2(const Matrix& other, ::matrix_mappers::Map2Func map) const {
AssertSameDimensions(other);
Matrix result(rows_, cols_, depth_);
hipLaunchKernelGGL(( map), dim3((size_ + 255) / 256), dim3(256), 0, 0,
data_.get(), other.data_.get(), result.data_.get(), size_);
CUDA_ASYNC_CHECK();
return result;
}
Matrix Matrix::Add(const Matrix& other) const {
return Map2(other, ElementwiseAddKernel);
}
Matrix Matrix::ElementwiseMultiply(const Matrix& other) const {
return Map2(other, ElementwiseMultiplyKernel);
}
Matrix Matrix::ElementwiseDivide(const Matrix& other) const {
return Map2(other, ElementwiseDivideKernel);
}
__global__ void AddConstKernel(float* a, float* b, int size, float c) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] + c;
}
}
__global__ void PowConstKernel(float* a, float* b, int size, float exp) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = pow(a[i], exp);
}
}
__global__ void MultiplyConstKernel(float* a, float* b, int size, float m) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] * m;
}
}
__global__ void DivideConstKernel(float* a, float* b, int size, float d) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] / d;
}
}
Matrix Matrix::Map1P(float param, ::matrix_mappers::Map1PFunc map) const {
Matrix result(rows_, cols_, depth_);
hipLaunchKernelGGL(( map), dim3((size_ + 255) / 256), dim3(256), 0, 0,
data_.get(), result.data_.get(), size_, param);
CUDA_ASYNC_CHECK();
return result;
}
Matrix Matrix::AddConst(float c) const {
return Map1P(c, AddConstKernel);
}
Matrix Matrix::Pow(float exp) const {
return Map1P(exp, PowConstKernel);
}
Matrix Matrix::Multiply(float m) const {
return Map1P(m, MultiplyConstKernel);
}
Matrix Matrix::Divide(float m) const {
return Map1P(m, DivideConstKernel);
}
__global__ void MatrixSumLayers(
MatrixPack a, MatrixPack b) {
int b_index = threadIdx.x + blockDim.x * blockIdx.x;
if (b_index < b.depth) {
float result = 0.0;
for (int k = b_index; k < a.depth; k += b.depth) {
for (int i = 0; i < a.rows; ++i) {
for (int j = 0; j < a.cols; ++j) {
result += a.get(i, j, k);
}
}
}
b.items[b_index] = result;
}
}
float Matrix::Sum() const {
Matrix result(1, 1, 1);
hipLaunchKernelGGL(( MatrixSumLayers), dim3(1), dim3(1), 0, 0, MatrixPack(*this), MatrixPack(result));
CUDA_ASYNC_CHECK();
return result.GetValue(0, 0, 0);
}
__global__ void MatrixSumColumns(
MatrixPack a, MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < a.rows) {
float result = 0.0f;
for (int j = 0; j < a.cols; ++j) {
result += a.get(i, j, 0);
}
b.set(i, 0, result);
}
}
Matrix Matrix::Sum(bool layered, int layers) const {
if (!layered) {
assert(rows_ == layers);
// sum columns
assert(depth_ == 1);
Matrix result(rows_, 1, 1);
hipLaunchKernelGGL(( MatrixSumColumns), dim3((rows_ + 255) / 256), dim3(256), 0, 0,
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
} else {
// sum layers
assert(layers > 0);
assert(depth_ % layers == 0);
Matrix result(1, 1, layers);
hipLaunchKernelGGL(( MatrixSumLayers), dim3((layers + 7) / 8), dim3(8), 0, 0,
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
}
__global__ void MatrixRepeatLayers(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
b.set(i, j, k, a.get(0, 0, k % a.depth));
}
}
__global__ void MatrixRepeatColumns(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (b.inside(i, j)) {
b.set(i, j, a.get(i, 0));
}
}
Matrix Matrix::Repeat(
bool layered, int rows, int cols, int depth) const {
if (layered) {
assert(depth > 0);
assert(depth % depth_ == 0);
assert(rows_ == 1);
assert(cols_ == 1);
Matrix result(rows, cols, depth);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
hipLaunchKernelGGL(( MatrixRepeatLayers), dim3(blocks), dim3(threads_per_block), 0, 0,
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
} else {
assert(rows % rows_ == 0);
assert(depth == 1);
assert(depth_ == 1);
assert(cols_ == 1);
Matrix result(rows, cols, depth);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
hipLaunchKernelGGL(( MatrixRepeatColumns), dim3(blocks), dim3(threads_per_block), 0, 0,
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
}
Matrix Matrix::Repeat(bool layered, const Matrix& size_template) const {
return Repeat(
layered,
size_template.rows(),
size_template.cols(),
size_template.depth());
}
__global__ void MatrixPerLayerSum(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
float sum = 0.0f;
for (int k1 = k; k1 < a.depth; k1 += b.depth) {
sum += a.get(i, j, k1);
}
b.set(i, j, k, sum);
}
}
Matrix Matrix::PerLayerSum(int layers) const {
assert(depth_ % layers == 0);
Matrix result(rows_, cols_, layers);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
hipLaunchKernelGGL(( MatrixPerLayerSum), dim3(blocks), dim3(threads_per_block), 0, 0,
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void MatrixPerLayerRepeat(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
b.set(i, j, k, a.get(i, j, k % a.depth));
}
}
Matrix Matrix::PerLayerRepeat(int times) const {
Matrix result(rows_, cols_, depth_ * times);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
hipLaunchKernelGGL(( MatrixPerLayerRepeat), dim3(blocks), dim3(threads_per_block), 0, 0,
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void VecL2(float* A, int len, float* B) {
float result = 0.0;
for (int i = 0; i < len; ++i) {
result += A[i] * A[i];
}
B[0] = result;
}
float Matrix::L2() const {
Matrix result(1, 1, 1);
hipLaunchKernelGGL(( VecL2), dim3(1), dim3(1), 0, 0, data_.get(), size_, result.data_.get());
CUDA_ASYNC_CHECK();
return result.GetValue(0, 0, 0);
}
__global__ void VecSoftmax(MatrixPack a, MatrixPack b, MatrixPack c) {
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (col < a.cols) {
// Get max value from column. Needed for numerical stability, see
// http://cs231n.github.io/linear-classify/#softmax
float max_val = a.get(0, col);
for (int i = 1; i < a.rows; i++) {
float val = a.get(i, col);
if (val > max_val) {
max_val = val;
}
}
int expected_class = static_cast<int>(b.get(0, col));
float expected_class_score = -1.0;
float sum = 0.0f;
for (int i = 0; i < a.rows; ++i) {
float val = a.get(i, col) - max_val;
if (i == expected_class) {
expected_class_score = val;
}
sum += exp(val);
}
c.set(0, col, -expected_class_score + log(sum));
}
}
float Matrix::Softmax(const Matrix& expected_class) const {
assert(depth_ == 1);
// rows_ = number of classes
// cols_ = number of samples (we run the same algorithm for each sample)
assert(expected_class.rows_ == 1);
assert(expected_class.cols_ == cols_);
assert(expected_class.depth_ == 1);
Matrix result(1, cols_, 1);
hipLaunchKernelGGL(( VecSoftmax), dim3((cols_ + 255) / 256), dim3(256), 0, 0,
MatrixPack(*this),
MatrixPack(expected_class),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result.Sum();
}
__global__ void VecSoftmaxGradient(
MatrixPack a,
MatrixPack b,
MatrixPack c) {
// TODO: clean up code duplication with VecSoftmax
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (col < a.cols) {
float max_val = a.get(0, col);
for (int i = 1; i < a.rows; i++) {
float val = a.get(i, col);
if (val > max_val) {
max_val = val;
}
}
float sum = 0.0f;
for (int i = 0; i < a.rows; ++i) {
float val = exp(a.get(i, col) - max_val);
c.set(i, col, val);
sum += val;
}
int expected_class = static_cast<int>(b.get(0, col));
for (int i = 0; i < a.rows; ++i) {
c.div(i, col, sum);
if (i == expected_class) {
c.add(i, col, -1.0f);
}
}
}
}
Matrix Matrix::SoftmaxGradient(const Matrix& expected_class) const {
// Covered in cnn/error_layer_test.cc.
assert(depth_ == 1);
// rows_ = number of classes
// cols_ = number of samples (we run the same algorithm for each sample)
assert(expected_class.rows_ == 1);
assert(expected_class.cols_ == cols_);
assert(expected_class.depth_ == 1);
Matrix result(rows_, cols_, 1);
hipLaunchKernelGGL(( VecSoftmaxGradient), dim3((cols_ + 255) / 256), dim3(256), 0, 0,
MatrixPack(*this),
MatrixPack(expected_class),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void VecNumMatches(MatrixPack a, MatrixPack b, MatrixPack c) {
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (col < a.cols) {
// Get max value from column.
bool unique = true;
float max_val = a.get(0, col);
for (int i = 1; i < a.rows; i++) {
float val = a.get(i, col);
if (val > max_val) {
max_val = val;
unique = true;
} else if (val == max_val) {
unique = false;
}
}
if (unique) {
int expected_class = static_cast<int>(b.get(0, col));
float expected_class_score = a.get(expected_class, col);
if (expected_class_score == max_val) {
c.set(0, col, 1.0f);
} else {
c.set(0, col, 0.0f);
}
} else {
c.set(0, col, 0.0f);
}
}
}
float Matrix::NumMatches(const Matrix& expected_class) const {
assert(depth_ == 1);
// rows_ = number of classes
// cols_ = number of samples (we run the same algorithm for each sample)
assert(expected_class.rows_ == 1);
assert(expected_class.cols_ == cols_);
assert(expected_class.depth_ == 1);
Matrix result(1, cols_, 1);
hipLaunchKernelGGL(( VecNumMatches), dim3((cols_ + 255) / 256), dim3(256), 0, 0,
MatrixPack(*this),
MatrixPack(expected_class),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result.Sum();
}
__global__ void VecFill(float value, float* A, int a_size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < a_size) {
A[i] = value;
}
}
void Matrix::Fill(float value) {
hipLaunchKernelGGL(( VecFill), dim3((size_ + 255) / 256), dim3(256), 0, 0, value, data_.get(), size_);
CUDA_ASYNC_CHECK();
}
__global__ void MatrixConvolution(
int layers_per_image,
MatrixPack a, bool a_major, int num_a_images,
int a_row_shift, int a_col_shift,
MatrixPack filters, bool filters_major, int num_filters_images,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
// k: destination depth-level = id of filter to apply
// layout of resulting matrix (list of layers):
//
// 1st image with 1st filter
// 1st image with 2nd filter
// ...
// 2nd image with 1st filter
// 2nd image with 2nd filter
// ...
int a_i = i + a_row_shift;
int a_j = j + a_col_shift;
int filter_id = k % num_filters_images;
int image_id = k / num_filters_images;
float sum = 0.0;
for (int fk = 0; fk < layers_per_image; ++fk) {
int filters_k = 0; // Layer id in |filters| to use below.
int a_k = 0; // Layer id in |a| to use now below.
if (a_major) {
a_k = fk + image_id * layers_per_image;
} else {
a_k = fk * num_a_images + image_id;
}
if (filters_major) {
filters_k = fk + filter_id * layers_per_image;
} else {
filters_k = fk * num_filters_images + filter_id;
}
for (int fi = 0; fi < filters.rows; ++fi) {
for (int fj = 0; fj < filters.cols; ++fj) {
if (fi >= -a_i && fi < a.rows - a_i && fj >= -a_j && fj < a.cols - a_j) {
sum += filters.get(fi, fj, filters_k) * a.get(a_i + fi, a_j + fj, a_k);
}
}
}
}
b.set(i, j, k, sum);
}
}
Matrix Matrix::Convolution(
int layers_per_image,
const Matrix& a, bool a_major,
const Matrix& b, bool b_major) {
return Convolution(
layers_per_image,
a, a_major, 0, 0,
b, b_major, 0, 0,
0, 0);
}
Matrix Matrix::Convolution(
int layers_per_image,
const Matrix& a, bool a_major, int a_row_padding, int a_col_padding,
const Matrix& b, bool b_major, int b_row_padding, int b_col_padding,
int c_row_padding, int c_col_padding) {
int row_slots = a.rows() + 2 * a_row_padding - b.rows() - 2 * b_row_padding + 1;
int col_slots = a.cols() + 2 * a_col_padding - b.cols() - 2 * b_col_padding + 1;
assert(a.depth() % layers_per_image == 0);
assert(b.depth() % layers_per_image == 0);
int num_a_images = a.depth() / layers_per_image;
int num_b_images = b.depth() / layers_per_image;
Matrix c(
row_slots - 2 * c_row_padding,
col_slots - 2 * c_col_padding,
num_a_images * num_b_images);
int a_row_shift = c_row_padding - a_row_padding + b_row_padding;
int a_col_shift = c_col_padding - a_col_padding + b_col_padding;
dim3 threads_per_block(1, min(c.cols(), 32), 4);
dim3 blocks = CalculateBlocks(c, threads_per_block);
hipLaunchKernelGGL(( MatrixConvolution), dim3(blocks), dim3(threads_per_block), 0, 0,
layers_per_image,
MatrixPack(a), a_major, num_a_images,
a_row_shift, a_col_shift,
MatrixPack(b), b_major, num_b_images,
MatrixPack(c));
CUDA_ASYNC_CHECK();
return c;
}
__global__ void MatrixPooling(
int pool_rows, int pool_cols,
MatrixPack a,
MatrixPack pooled,
MatrixPack switches) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (i < pooled.rows && j < pooled.cols && k < pooled.depth) {
int best_sub_index = -1;
float best_value = 0;
for (int a_sub_index = 0; a_sub_index < pool_rows * pool_cols; a_sub_index++) {
float value = a.get(
i * pool_rows + a_sub_index / pool_cols,
j * pool_cols + a_sub_index % pool_cols,
k);
if (best_sub_index < 0 || value > best_value) {
best_sub_index = a_sub_index;
best_value = value;
}
}
pooled.set(i, j, k, best_value);
switches.set(i, j, k, best_sub_index);
}
}
std::pair<Matrix, Matrix> Matrix::Pooling(
int pool_rows, int pool_cols) const {
assert(rows_ % pool_rows == 0);
assert(cols_ % pool_cols == 0);
Matrix pooled(rows_ / pool_rows, cols_ / pool_cols, depth_);
Matrix switches(rows_ / pool_rows, cols_ / pool_cols, depth_);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(pooled, threads_per_block);
hipLaunchKernelGGL(( MatrixPooling), dim3(blocks), dim3(threads_per_block), 0, 0,
pool_rows, pool_cols,
MatrixPack(*this),
MatrixPack(pooled),
MatrixPack(switches));
CUDA_ASYNC_CHECK();
return std::make_pair(pooled, switches);
}
__global__ void MatrixPoolingSwitch(
int pool_rows, int pool_cols,
MatrixPack switches,
MatrixPack input,
MatrixPack result) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (input.inside(i, j, k)) {
int sub_index = switches.get(i, j, k);
result.set(
i * pool_rows + sub_index / pool_cols,
j * pool_cols + sub_index % pool_cols,
k,
input.get(i, j, k));
}
}
Matrix Matrix::PoolingSwitch(
const Matrix& switches,
int pool_rows, int pool_cols) const {
AssertSameDimensions(switches);
Matrix result(rows_ * pool_rows, cols_ * pool_cols, depth_);
result.Fill(0);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(switches, threads_per_block);
hipLaunchKernelGGL(( MatrixPoolingSwitch), dim3(blocks), dim3(threads_per_block), 0, 0,
pool_rows, pool_cols,
MatrixPack(switches),
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
Matrix Matrix::ReshapeToColumns(int unit_depth) const {
assert(depth_ % unit_depth == 0);
Matrix rows(*this);
rows.cols_ = rows_ * cols_ * unit_depth;
rows.rows_ = depth_ / unit_depth;
rows.depth_ = 1;
return rows.T();
}
Matrix Matrix::ReshapeFromColumns(int unit_rows, int unit_cols, int unit_depth) const {
assert(unit_rows * unit_cols * unit_depth == rows_);
Matrix rows(this->T());
rows.depth_ = rows.rows_ * rows.cols_ / (unit_rows * unit_cols);
rows.rows_ = unit_rows;
rows.cols_ = unit_cols;
return rows;
}
__global__ void VecInvertedDropoutFill(
float* A,
int size,
float p) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
A[i] = A[i] < p ? (1.0 / p) : 0.0;
}
}
// static
Matrix Matrix::MakeInvertedDropoutMask(
bool layered, int num_neurons, float p, Random* random) {
unsigned long seed = random->RandLongUnsigned();
hiprandGenerator_t gen;
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, seed));
Matrix result(
layered ? 1 : num_neurons,
1,
layered ? num_neurons : 1);
CURAND_CALL(hiprandGenerateUniform(gen, result.data_.get(), result.size()));
hipLaunchKernelGGL(( VecInvertedDropoutFill), dim3((255 + result.size()) / 256), dim3(256), 0, 0,
result.data_.get(), result.size(), p);
CUDA_ASYNC_CHECK();
CURAND_CALL(hiprandDestroyGenerator(gen));
return result;
}
Matrix Matrix::DeepCopy() const {
Matrix result(rows_, cols_, depth_);
CUDA_CALL(hipMemcpy(
result.data_.get(),
data_.get(),
size_ * sizeof(float),
hipMemcpyDeviceToDevice));
return result;
}
float Matrix::GetValue(int row, int col, int depth) const {
float result;
CUDA_CALL(hipMemcpy(
&result,
data_.get() + Index(row, col, depth),
sizeof(float),
hipMemcpyDeviceToHost));
return result;
}
void Matrix::SetValue(int row, int col, int depth, float value) {
CUDA_CALL(hipMemcpy(
data_.get() + Index(row, col, depth),
&value,
sizeof(float),
hipMemcpyHostToDevice));
}
void Matrix::save(cereal::PortableBinaryOutputArchive& ar) const {
std::vector<float> values = GetVector();
ar(rows_, cols_, depth_, values);
}
void Matrix::load(cereal::PortableBinaryInputArchive& ar) {
std::vector<float> values;
ar(rows_, cols_, depth_, values);
size_ = rows_ * cols_ * depth_;
SetVector(values);
}
|
9787dce0f7a3fc0925d8c7ea8588bad7cfb840a4.cu
|
#include "linalg/matrix.h"
#include "linalg/cuda_util.h"
#include <cassert> // TODO: release-mode assert
#include <iostream>
#include <iomanip>
#include <math.h>
#include <cuda.h> // strangely, not needed by nvcc
#include <curand.h>
#include <cereal/archives/portable_binary.hpp>
#include <cereal/types/memory.hpp>
#include <cereal/types/vector.hpp>
namespace matrix_cuda_util {
void SynchronizeForPerfLogging() {
cudaDeviceSynchronize();
}
}
int Matrix::Index(int i, int j, int k) const {
return k * rows_ * cols_ + i * cols_ + j;
}
struct MatrixPack {
float* items;
int rows;
int cols;
int depth;
int layer_size;
explicit MatrixPack(const Matrix& m) :
items(m.data_.get()),
rows(m.rows()),
cols(m.cols()),
depth(m.depth()),
layer_size(m.rows() * m.cols()) {}
__forceinline__ __device__ float get(int i, int j, int k) {
return items[k * layer_size + i * cols + j];
}
__forceinline__ __device__ float get(int i, int j) {
return items[i * cols + j];
}
__forceinline__ __device__ void set(int i, int j, int k, float f) {
items[k * layer_size + i * cols + j] = f;
}
__forceinline__ __device__ void set(int i, int j, float f) {
items[i * cols + j] = f;
}
__forceinline__ __device__ void div(int i, int j, float f) {
items[i * cols + j] /= f;
}
__forceinline__ __device__ void add(int i, int j, float a) {
items[i * cols + j] += a;
}
__forceinline__ __device__ bool inside(int i, int j, int k) {
return i < rows && j < cols && k < depth;
}
__forceinline__ __device__ bool inside(int i, int j) {
return i < rows && j < cols;
}
};
dim3 CalculateBlocks(
const Matrix& result,
dim3 threads_per_block) {
return dim3(
(result.rows() + threads_per_block.x - 1) / threads_per_block.x,
(result.cols() + threads_per_block.y - 1) / threads_per_block.y,
(result.depth() + threads_per_block.z - 1) / threads_per_block.z);
}
Matrix::Matrix() :
rows_(0),
cols_(0),
depth_(0),
size_(0),
data_(NULL) {}
std::shared_ptr<float> AllocateData(int size) {
float* data;
CUDA_CALL(cudaMalloc(&data, size * sizeof(float)));
return std::shared_ptr<float>(data, cudaFree);
}
std::shared_ptr<float> ImportData(int size, const float* host_data) {
std::shared_ptr<float> device_data(AllocateData(size));
CUDA_CALL(cudaMemcpy(
device_data.get(),
host_data,
size * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_ASYNC_CHECK();
return device_data;
}
Matrix::Matrix(int rows, int cols, int depth, const std::vector<float>& data) :
rows_(rows),
cols_(cols),
depth_(depth),
size_(rows * cols * depth) {
SetVector(data);
}
Matrix::Matrix(int rows, int cols, int depth) :
rows_(rows),
cols_(cols),
depth_(depth),
size_(rows * cols * depth) {
data_ = AllocateData(size_);
}
std::shared_ptr<float> Matrix::get_host_data() const {
std::shared_ptr<float> host_data;
host_data.reset(new float[size_], std::default_delete<float[]>() );
CUDA_CALL(cudaMemcpy(
host_data.get(),
data_.get(),
size_ * sizeof(float),
cudaMemcpyDeviceToHost));
return host_data;
}
void Matrix::SetVector(const std::vector<float>& data) {
assert(data.size() == size_);
data_ = ImportData(size_, &data[0]);
}
std::vector<float> Matrix::GetVector() const {
std::shared_ptr<float> host_data(get_host_data());
std::vector<float> v;
for (int k = 0; k < depth_; ++k) {
for (int i = 0; i < rows_; ++i) {
for (int j = 0; j < cols_; ++j) {
v.push_back(host_data.get()[Index(i, j, k)]);
}
}
}
return v;
}
void Matrix::Print() const {
std::cout << std::fixed << std::setw(4) << std::setprecision(2);
std::shared_ptr<float> host_data(get_host_data());
std::cout << "Matrix "
<< rows_ << "x"
<< cols_ << "x"
<< depth_
<< " (" << size_ << ")" << std::endl;
for (int k = 0; k < depth_; ++k) {
for (int i = 0; i < rows_; ++i) {
for (int j = 0; j < cols_; ++j) {
std::cout << host_data.get()[Index(i, j, k)] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
void Matrix::AssertEquals(const Matrix& other) const {
AssertSameDimensions(other);
assert(GetVector() == other.GetVector());
}
void Matrix::AssertDimensions(int rows, int cols, int depth) const {
assert(rows_ == rows && cols_ == cols && depth_ == depth);
}
void Matrix::AssertSameDimensions(const Matrix& other) const {
assert(rows_ == other.rows_ && cols_ == other.cols_ && depth_ == other.depth_);
}
void Matrix::AssertRows(int rows) const {
assert(rows_ == rows);
}
void Matrix::AssertDepth(int depth) const {
assert(depth_ == depth);
}
__global__ void MatrixTranspose(MatrixPack a, MatrixPack t) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (a.inside(i, j)) {
t.set(j, i, a.get(i, j));
}
}
Matrix Matrix::T() const {
assert(depth_ == 1);
Matrix result(cols_, rows_, depth_);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(*this, threads_per_block);
MatrixTranspose<<<blocks, threads_per_block>>>(
MatrixPack(*this), MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void MatrixRot180(
MatrixPack a, MatrixPack r) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (a.inside(i, j, k)) {
r.set(r.rows - i - 1, r.cols - j - 1, k, a.get(i, j, k));
}
}
Matrix Matrix::Rot180() const {
Matrix result(rows_, cols_, depth_);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
MatrixRot180<<<blocks, threads_per_block>>>(
MatrixPack(*this),
MatrixPack(result));
return result;
}
__global__ void MatrixDotProd(
MatrixPack a,
MatrixPack b,
MatrixPack c) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (c.inside(i, j)) {
float sum = 0.0;
for (int k = 0; k < a.cols; ++k) {
sum += a.get(i, k) * b.get(k, j);
}
c.set(i, j, sum);
}
}
Matrix Matrix::Dot(const Matrix& other) const {
assert(cols_ == other.rows_);
assert(depth_ == 1);
int c_rows = rows_;
int c_cols = other.cols_;
Matrix result(c_rows, c_cols, 1);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
MatrixDotProd<<<blocks, threads_per_block>>>(
MatrixPack(*this),
MatrixPack(other),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void VecSigmoid(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = 1.0 / (1.0 + exp(-a[i]));
}
}
__global__ void VecSigmoidGradient(float* a, float*b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
float sigma = 1.0 / (1.0 + exp(-a[i]));
b[i] = sigma * (1.0 - sigma);
}
}
__global__ void VecReLU(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = max(0.0f, a[i]);
}
}
__global__ void VecReLUGradient(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
if (a[i] < 0.0f) {
b[i] = 0.0f;
} else {
b[i] = 1.0f;
}
}
}
__global__ void VecLReLU(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = max(0.01f * a[i], a[i]);
}
}
__global__ void VecLReLUGradient(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
if (a[i] < 0.0f) {
b[i] = 0.01f;
} else {
b[i] = 1.0f;
}
}
}
__global__ void VecSquare(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] * a[i];
}
}
__global__ void VecSqrt(float* a, float* b, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = sqrt(a[i]);
}
}
namespace matrix_mappers {
// We provide factory methdos instead of direct implementations
// so that users of device_matrix.h won't need to depend on
// CUDA stuff.
Map1Func Sigmoid() {
return &VecSigmoid;
}
Map1Func SigmoidGradient() {
return &VecSigmoidGradient;
}
Map1Func ReLU() {
return &VecReLU;
}
Map1Func ReLUGradient() {
return &VecReLUGradient;
}
Map1Func LReLU() {
return &VecLReLU;
}
Map1Func LReLUGradient() {
return &VecLReLUGradient;
}
Map1Func Square() {
return &VecSquare;
}
Map1Func Sqrt() {
return &VecSqrt;
}
} // namespacce matrix_mappers
Matrix Matrix::Map1(::matrix_mappers::Map1Func map) const {
Matrix result(rows_, cols_, depth_);
map<<<(size_ + 255) / 256, 256>>>(
data_.get(),
result.data_.get(),
size_);
CUDA_ASYNC_CHECK();
return result;
}
__global__ void ElementwiseAddKernel(float* a, float* b, float* c, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
__global__ void ElementwiseMultiplyKernel(float* a, float* b, float* c, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
c[i] = a[i] * b[i];
}
}
__global__ void ElementwiseDivideKernel(float* a, float* b, float* c, int size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
c[i] = a[i] / b[i];
}
}
Matrix Matrix::Map2(const Matrix& other, ::matrix_mappers::Map2Func map) const {
AssertSameDimensions(other);
Matrix result(rows_, cols_, depth_);
map<<<(size_ + 255) / 256, 256>>>(
data_.get(), other.data_.get(), result.data_.get(), size_);
CUDA_ASYNC_CHECK();
return result;
}
Matrix Matrix::Add(const Matrix& other) const {
return Map2(other, ElementwiseAddKernel);
}
Matrix Matrix::ElementwiseMultiply(const Matrix& other) const {
return Map2(other, ElementwiseMultiplyKernel);
}
Matrix Matrix::ElementwiseDivide(const Matrix& other) const {
return Map2(other, ElementwiseDivideKernel);
}
__global__ void AddConstKernel(float* a, float* b, int size, float c) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] + c;
}
}
__global__ void PowConstKernel(float* a, float* b, int size, float exp) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = pow(a[i], exp);
}
}
__global__ void MultiplyConstKernel(float* a, float* b, int size, float m) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] * m;
}
}
__global__ void DivideConstKernel(float* a, float* b, int size, float d) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
b[i] = a[i] / d;
}
}
Matrix Matrix::Map1P(float param, ::matrix_mappers::Map1PFunc map) const {
Matrix result(rows_, cols_, depth_);
map<<<(size_ + 255) / 256, 256>>>(
data_.get(), result.data_.get(), size_, param);
CUDA_ASYNC_CHECK();
return result;
}
Matrix Matrix::AddConst(float c) const {
return Map1P(c, AddConstKernel);
}
Matrix Matrix::Pow(float exp) const {
return Map1P(exp, PowConstKernel);
}
Matrix Matrix::Multiply(float m) const {
return Map1P(m, MultiplyConstKernel);
}
Matrix Matrix::Divide(float m) const {
return Map1P(m, DivideConstKernel);
}
__global__ void MatrixSumLayers(
MatrixPack a, MatrixPack b) {
int b_index = threadIdx.x + blockDim.x * blockIdx.x;
if (b_index < b.depth) {
float result = 0.0;
for (int k = b_index; k < a.depth; k += b.depth) {
for (int i = 0; i < a.rows; ++i) {
for (int j = 0; j < a.cols; ++j) {
result += a.get(i, j, k);
}
}
}
b.items[b_index] = result;
}
}
float Matrix::Sum() const {
Matrix result(1, 1, 1);
MatrixSumLayers<<<1, 1>>>(MatrixPack(*this), MatrixPack(result));
CUDA_ASYNC_CHECK();
return result.GetValue(0, 0, 0);
}
__global__ void MatrixSumColumns(
MatrixPack a, MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < a.rows) {
float result = 0.0f;
for (int j = 0; j < a.cols; ++j) {
result += a.get(i, j, 0);
}
b.set(i, 0, result);
}
}
Matrix Matrix::Sum(bool layered, int layers) const {
if (!layered) {
assert(rows_ == layers);
// sum columns
assert(depth_ == 1);
Matrix result(rows_, 1, 1);
MatrixSumColumns<<<(rows_ + 255) / 256, 256>>>(
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
} else {
// sum layers
assert(layers > 0);
assert(depth_ % layers == 0);
Matrix result(1, 1, layers);
MatrixSumLayers<<<(layers + 7) / 8, 8>>>(
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
}
__global__ void MatrixRepeatLayers(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
b.set(i, j, k, a.get(0, 0, k % a.depth));
}
}
__global__ void MatrixRepeatColumns(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (b.inside(i, j)) {
b.set(i, j, a.get(i, 0));
}
}
Matrix Matrix::Repeat(
bool layered, int rows, int cols, int depth) const {
if (layered) {
assert(depth > 0);
assert(depth % depth_ == 0);
assert(rows_ == 1);
assert(cols_ == 1);
Matrix result(rows, cols, depth);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
MatrixRepeatLayers<<<blocks, threads_per_block>>>(
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
} else {
assert(rows % rows_ == 0);
assert(depth == 1);
assert(depth_ == 1);
assert(cols_ == 1);
Matrix result(rows, cols, depth);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
MatrixRepeatColumns<<<blocks, threads_per_block>>>(
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
}
Matrix Matrix::Repeat(bool layered, const Matrix& size_template) const {
return Repeat(
layered,
size_template.rows(),
size_template.cols(),
size_template.depth());
}
__global__ void MatrixPerLayerSum(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
float sum = 0.0f;
for (int k1 = k; k1 < a.depth; k1 += b.depth) {
sum += a.get(i, j, k1);
}
b.set(i, j, k, sum);
}
}
Matrix Matrix::PerLayerSum(int layers) const {
assert(depth_ % layers == 0);
Matrix result(rows_, cols_, layers);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
MatrixPerLayerSum<<<blocks, threads_per_block>>>(
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void MatrixPerLayerRepeat(
MatrixPack a,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
b.set(i, j, k, a.get(i, j, k % a.depth));
}
}
Matrix Matrix::PerLayerRepeat(int times) const {
Matrix result(rows_, cols_, depth_ * times);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(result, threads_per_block);
MatrixPerLayerRepeat<<<blocks, threads_per_block>>>(
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void VecL2(float* A, int len, float* B) {
float result = 0.0;
for (int i = 0; i < len; ++i) {
result += A[i] * A[i];
}
B[0] = result;
}
float Matrix::L2() const {
Matrix result(1, 1, 1);
VecL2<<<1, 1>>>(data_.get(), size_, result.data_.get());
CUDA_ASYNC_CHECK();
return result.GetValue(0, 0, 0);
}
__global__ void VecSoftmax(MatrixPack a, MatrixPack b, MatrixPack c) {
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (col < a.cols) {
// Get max value from column. Needed for numerical stability, see
// http://cs231n.github.io/linear-classify/#softmax
float max_val = a.get(0, col);
for (int i = 1; i < a.rows; i++) {
float val = a.get(i, col);
if (val > max_val) {
max_val = val;
}
}
int expected_class = static_cast<int>(b.get(0, col));
float expected_class_score = -1.0;
float sum = 0.0f;
for (int i = 0; i < a.rows; ++i) {
float val = a.get(i, col) - max_val;
if (i == expected_class) {
expected_class_score = val;
}
sum += exp(val);
}
c.set(0, col, -expected_class_score + log(sum));
}
}
float Matrix::Softmax(const Matrix& expected_class) const {
assert(depth_ == 1);
// rows_ = number of classes
// cols_ = number of samples (we run the same algorithm for each sample)
assert(expected_class.rows_ == 1);
assert(expected_class.cols_ == cols_);
assert(expected_class.depth_ == 1);
Matrix result(1, cols_, 1);
VecSoftmax<<<(cols_ + 255) / 256, 256>>>(
MatrixPack(*this),
MatrixPack(expected_class),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result.Sum();
}
__global__ void VecSoftmaxGradient(
MatrixPack a,
MatrixPack b,
MatrixPack c) {
// TODO: clean up code duplication with VecSoftmax
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (col < a.cols) {
float max_val = a.get(0, col);
for (int i = 1; i < a.rows; i++) {
float val = a.get(i, col);
if (val > max_val) {
max_val = val;
}
}
float sum = 0.0f;
for (int i = 0; i < a.rows; ++i) {
float val = exp(a.get(i, col) - max_val);
c.set(i, col, val);
sum += val;
}
int expected_class = static_cast<int>(b.get(0, col));
for (int i = 0; i < a.rows; ++i) {
c.div(i, col, sum);
if (i == expected_class) {
c.add(i, col, -1.0f);
}
}
}
}
Matrix Matrix::SoftmaxGradient(const Matrix& expected_class) const {
// Covered in cnn/error_layer_test.cc.
assert(depth_ == 1);
// rows_ = number of classes
// cols_ = number of samples (we run the same algorithm for each sample)
assert(expected_class.rows_ == 1);
assert(expected_class.cols_ == cols_);
assert(expected_class.depth_ == 1);
Matrix result(rows_, cols_, 1);
VecSoftmaxGradient<<<(cols_ + 255) / 256, 256>>>(
MatrixPack(*this),
MatrixPack(expected_class),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
__global__ void VecNumMatches(MatrixPack a, MatrixPack b, MatrixPack c) {
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (col < a.cols) {
// Get max value from column.
bool unique = true;
float max_val = a.get(0, col);
for (int i = 1; i < a.rows; i++) {
float val = a.get(i, col);
if (val > max_val) {
max_val = val;
unique = true;
} else if (val == max_val) {
unique = false;
}
}
if (unique) {
int expected_class = static_cast<int>(b.get(0, col));
float expected_class_score = a.get(expected_class, col);
if (expected_class_score == max_val) {
c.set(0, col, 1.0f);
} else {
c.set(0, col, 0.0f);
}
} else {
c.set(0, col, 0.0f);
}
}
}
float Matrix::NumMatches(const Matrix& expected_class) const {
assert(depth_ == 1);
// rows_ = number of classes
// cols_ = number of samples (we run the same algorithm for each sample)
assert(expected_class.rows_ == 1);
assert(expected_class.cols_ == cols_);
assert(expected_class.depth_ == 1);
Matrix result(1, cols_, 1);
VecNumMatches<<<(cols_ + 255) / 256, 256>>>(
MatrixPack(*this),
MatrixPack(expected_class),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result.Sum();
}
__global__ void VecFill(float value, float* A, int a_size) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < a_size) {
A[i] = value;
}
}
void Matrix::Fill(float value) {
VecFill<<<(size_ + 255) / 256, 256>>>(value, data_.get(), size_);
CUDA_ASYNC_CHECK();
}
__global__ void MatrixConvolution(
int layers_per_image,
MatrixPack a, bool a_major, int num_a_images,
int a_row_shift, int a_col_shift,
MatrixPack filters, bool filters_major, int num_filters_images,
MatrixPack b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (b.inside(i, j, k)) {
// k: destination depth-level = id of filter to apply
// layout of resulting matrix (list of layers):
//
// 1st image with 1st filter
// 1st image with 2nd filter
// ...
// 2nd image with 1st filter
// 2nd image with 2nd filter
// ...
int a_i = i + a_row_shift;
int a_j = j + a_col_shift;
int filter_id = k % num_filters_images;
int image_id = k / num_filters_images;
float sum = 0.0;
for (int fk = 0; fk < layers_per_image; ++fk) {
int filters_k = 0; // Layer id in |filters| to use below.
int a_k = 0; // Layer id in |a| to use now below.
if (a_major) {
a_k = fk + image_id * layers_per_image;
} else {
a_k = fk * num_a_images + image_id;
}
if (filters_major) {
filters_k = fk + filter_id * layers_per_image;
} else {
filters_k = fk * num_filters_images + filter_id;
}
for (int fi = 0; fi < filters.rows; ++fi) {
for (int fj = 0; fj < filters.cols; ++fj) {
if (fi >= -a_i && fi < a.rows - a_i && fj >= -a_j && fj < a.cols - a_j) {
sum += filters.get(fi, fj, filters_k) * a.get(a_i + fi, a_j + fj, a_k);
}
}
}
}
b.set(i, j, k, sum);
}
}
Matrix Matrix::Convolution(
int layers_per_image,
const Matrix& a, bool a_major,
const Matrix& b, bool b_major) {
return Convolution(
layers_per_image,
a, a_major, 0, 0,
b, b_major, 0, 0,
0, 0);
}
Matrix Matrix::Convolution(
int layers_per_image,
const Matrix& a, bool a_major, int a_row_padding, int a_col_padding,
const Matrix& b, bool b_major, int b_row_padding, int b_col_padding,
int c_row_padding, int c_col_padding) {
int row_slots = a.rows() + 2 * a_row_padding - b.rows() - 2 * b_row_padding + 1;
int col_slots = a.cols() + 2 * a_col_padding - b.cols() - 2 * b_col_padding + 1;
assert(a.depth() % layers_per_image == 0);
assert(b.depth() % layers_per_image == 0);
int num_a_images = a.depth() / layers_per_image;
int num_b_images = b.depth() / layers_per_image;
Matrix c(
row_slots - 2 * c_row_padding,
col_slots - 2 * c_col_padding,
num_a_images * num_b_images);
int a_row_shift = c_row_padding - a_row_padding + b_row_padding;
int a_col_shift = c_col_padding - a_col_padding + b_col_padding;
dim3 threads_per_block(1, min(c.cols(), 32), 4);
dim3 blocks = CalculateBlocks(c, threads_per_block);
MatrixConvolution<<<blocks, threads_per_block>>>(
layers_per_image,
MatrixPack(a), a_major, num_a_images,
a_row_shift, a_col_shift,
MatrixPack(b), b_major, num_b_images,
MatrixPack(c));
CUDA_ASYNC_CHECK();
return c;
}
__global__ void MatrixPooling(
int pool_rows, int pool_cols,
MatrixPack a,
MatrixPack pooled,
MatrixPack switches) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (i < pooled.rows && j < pooled.cols && k < pooled.depth) {
int best_sub_index = -1;
float best_value = 0;
for (int a_sub_index = 0; a_sub_index < pool_rows * pool_cols; a_sub_index++) {
float value = a.get(
i * pool_rows + a_sub_index / pool_cols,
j * pool_cols + a_sub_index % pool_cols,
k);
if (best_sub_index < 0 || value > best_value) {
best_sub_index = a_sub_index;
best_value = value;
}
}
pooled.set(i, j, k, best_value);
switches.set(i, j, k, best_sub_index);
}
}
std::pair<Matrix, Matrix> Matrix::Pooling(
int pool_rows, int pool_cols) const {
assert(rows_ % pool_rows == 0);
assert(cols_ % pool_cols == 0);
Matrix pooled(rows_ / pool_rows, cols_ / pool_cols, depth_);
Matrix switches(rows_ / pool_rows, cols_ / pool_cols, depth_);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(pooled, threads_per_block);
MatrixPooling<<<blocks, threads_per_block>>>(
pool_rows, pool_cols,
MatrixPack(*this),
MatrixPack(pooled),
MatrixPack(switches));
CUDA_ASYNC_CHECK();
return std::make_pair(pooled, switches);
}
__global__ void MatrixPoolingSwitch(
int pool_rows, int pool_cols,
MatrixPack switches,
MatrixPack input,
MatrixPack result) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if (input.inside(i, j, k)) {
int sub_index = switches.get(i, j, k);
result.set(
i * pool_rows + sub_index / pool_cols,
j * pool_cols + sub_index % pool_cols,
k,
input.get(i, j, k));
}
}
Matrix Matrix::PoolingSwitch(
const Matrix& switches,
int pool_rows, int pool_cols) const {
AssertSameDimensions(switches);
Matrix result(rows_ * pool_rows, cols_ * pool_cols, depth_);
result.Fill(0);
dim3 threads_per_block(16, 16, 1);
dim3 blocks = CalculateBlocks(switches, threads_per_block);
MatrixPoolingSwitch<<<blocks, threads_per_block>>>(
pool_rows, pool_cols,
MatrixPack(switches),
MatrixPack(*this),
MatrixPack(result));
CUDA_ASYNC_CHECK();
return result;
}
Matrix Matrix::ReshapeToColumns(int unit_depth) const {
assert(depth_ % unit_depth == 0);
Matrix rows(*this);
rows.cols_ = rows_ * cols_ * unit_depth;
rows.rows_ = depth_ / unit_depth;
rows.depth_ = 1;
return rows.T();
}
Matrix Matrix::ReshapeFromColumns(int unit_rows, int unit_cols, int unit_depth) const {
assert(unit_rows * unit_cols * unit_depth == rows_);
Matrix rows(this->T());
rows.depth_ = rows.rows_ * rows.cols_ / (unit_rows * unit_cols);
rows.rows_ = unit_rows;
rows.cols_ = unit_cols;
return rows;
}
__global__ void VecInvertedDropoutFill(
float* A,
int size,
float p) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
A[i] = A[i] < p ? (1.0 / p) : 0.0;
}
}
// static
Matrix Matrix::MakeInvertedDropoutMask(
bool layered, int num_neurons, float p, Random* random) {
unsigned long seed = random->RandLongUnsigned();
curandGenerator_t gen;
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, seed));
Matrix result(
layered ? 1 : num_neurons,
1,
layered ? num_neurons : 1);
CURAND_CALL(curandGenerateUniform(gen, result.data_.get(), result.size()));
VecInvertedDropoutFill<<<(255 + result.size()) / 256, 256>>>(
result.data_.get(), result.size(), p);
CUDA_ASYNC_CHECK();
CURAND_CALL(curandDestroyGenerator(gen));
return result;
}
Matrix Matrix::DeepCopy() const {
Matrix result(rows_, cols_, depth_);
CUDA_CALL(cudaMemcpy(
result.data_.get(),
data_.get(),
size_ * sizeof(float),
cudaMemcpyDeviceToDevice));
return result;
}
float Matrix::GetValue(int row, int col, int depth) const {
float result;
CUDA_CALL(cudaMemcpy(
&result,
data_.get() + Index(row, col, depth),
sizeof(float),
cudaMemcpyDeviceToHost));
return result;
}
void Matrix::SetValue(int row, int col, int depth, float value) {
CUDA_CALL(cudaMemcpy(
data_.get() + Index(row, col, depth),
&value,
sizeof(float),
cudaMemcpyHostToDevice));
}
void Matrix::save(cereal::PortableBinaryOutputArchive& ar) const {
std::vector<float> values = GetVector();
ar(rows_, cols_, depth_, values);
}
void Matrix::load(cereal::PortableBinaryInputArchive& ar) {
std::vector<float> values;
ar(rows_, cols_, depth_, values);
size_ = rows_ * cols_ * depth_;
SetVector(values);
}
|
d4bb8dc2dc96b0adb7a414192506d1f323fca4a5.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/ReduceOps.h>
#include <ATen/native/Resize.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Resize.h>
#include <ATen/native/hip/Normalization.cuh>
#include <c10/hip/HIPMathCompat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/batch_norm_backward_elemt_native.h>
#include <ATen/ops/batch_norm_backward_reduce_native.h>
#include <ATen/ops/batch_norm_elemt_native.h>
#include <ATen/ops/batch_norm_gather_stats_native.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
#include <ATen/ops/batch_norm_stats_native.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/from_blob.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/scalar_tensor.h>
#endif
namespace at { namespace native {
namespace {
ScalarType first_type() {
return ScalarType::Undefined;
}
template <typename... Args>
ScalarType first_type(const Tensor& arg, const Args&... parameters) {
return arg.defined() ? arg.scalar_type() : first_type(parameters...);
}
// A transform is mixed type if the parameters are higher precision than the input
template <typename... Args>
bool is_mixed_type(const Tensor& input, const Args&... parameters) {
const auto parameter_type = first_type(parameters...);
return ((parameter_type != ScalarType::Undefined) &&
(parameter_type != input.scalar_type()));
}
inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) {
return (
self.is_contiguous(at::MemoryFormat::ChannelsLast) ||
self.is_contiguous(at::MemoryFormat::ChannelsLast3d) ||
(self.is_contiguous() && self.strides()[1] == 1)
);
}
enum class Impl {
Contiguous,
ChannelsLast,
General,
};
inline Impl batch_norm_choose_impl(const Tensor& self) {
if (!at::cuda::detail::canUse32BitIndexMath(self)) {
return Impl::General;
}
if (self.is_contiguous()) {
return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous;
}
if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) {
return Impl::ChannelsLast;
}
return Impl::General;
}
inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) {
auto imp1 = batch_norm_choose_impl(in1);
if (imp1 == Impl::General) {
return imp1;
}
auto imp2 = batch_norm_choose_impl(in2);
return imp1 == imp2 ? imp1 : Impl::General;
}
void batch_norm_elementwise(
const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) {
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt);
resize_output(out, self.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(self, *weight, *bias);
if (mixed_type) {
batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
}
});
return;
}
case Impl::ChannelsLast: {
auto weight = at::borrow_from_optional_tensor(weight_opt);
auto bias = at::borrow_from_optional_tensor(bias_opt);
if (resize_output_check(out, self.sizes())) {
resize_impl_cuda_(out.unsafeGetTensorImpl(), self.sizes(), self.strides());
}
if ((out.strides() == self.strides()) &&
(!weight->defined() || weight->is_contiguous()) &&
(!bias->defined() || bias->is_contiguous()) &&
(!mean_.defined() || mean_.is_contiguous()) &&
(!invstd_.defined() || invstd_.is_contiguous())) {
batch_norm_elemt_channels_last_cuda_template(
out, self, *weight, *bias, mean_, invstd_);
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
// Helper to convert 1d tensors to an nd tensor that broadcasts with input
// All elements go into the channel dimension
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto weight = weight_opt.has_value() && weight_opt->defined() ?
as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options());
auto bias = bias_opt.has_value() && bias_opt->defined() ?
as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options());
auto mean = as_nd(mean_);
auto invstd = as_nd(invstd_);
auto iter = TensorIteratorConfig()
.add_output(out)
.add_input(self)
.add_input(weight)
.add_input(bias)
.add_input(mean)
.add_input(invstd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias,
acc_t mean, acc_t invstd) -> scalar_t {
return ((input - mean) * invstd) * weight + bias;
});
});
return;
}
}
}
Tensor batch_norm_elementwise_backward_train(
const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) {
switch (batch_norm_choose_impl(input, grad_out)) {
case Impl::Contiguous: {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_elemt", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, weight);
if (mixed_type) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
});
}
case Impl::ChannelsLast: {
if ((!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()) {
return batch_norm_backward_elemt_channels_last_cuda_template(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
C10_FALLTHROUGH;
}
case Impl::General: {
const auto ndim = input.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto invstd_nd = as_nd(invstd);
auto mean_nd = as_nd(mean);
auto sum_dy_nd = as_nd(sum_dy);
auto sum_dy_xmu_nd = as_nd(sum_dy_xmu);
auto weight_nd = weight.defined() ? as_nd(weight) :
at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type()));
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(input)
.add_input(weight_nd)
.add_input(mean_nd)
.add_input(invstd_nd)
.add_input(sum_dy_xmu_nd)
.add_input(sum_dy_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) );
gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight,
accscalar_t mean, accscalar_t invstd,
accscalar_t xmu, accscalar_t dy) -> scalar_t {
auto factor_1_c = invstd * invstd * xmu * norm_fct;
auto factor_2_c = weight * invstd;
auto m_dy_c = dy * norm_fct;
return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c;
});
});
return grad_input;
}
}
TORCH_INTERNAL_ASSERT(false);
}
Tensor batch_norm_elementwise_backward_eval(
const Tensor& grad_out, const Tensor& input,
const Tensor& invstd, const Tensor& weight) {
const auto ndim = input.dim();
DimVector shape(ndim, 1), strides(ndim, 0);
shape[1] = invstd.sizes()[0];
strides[1] = invstd.strides()[0];
auto invstd_nd = invstd.as_strided(shape, strides);
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
if (weight.defined()) {
strides[1] = weight.strides()[0];
auto weight_nd = weight.as_strided(shape, strides);
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.add_input(weight_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight)
-> scalar_t {
return gO * weight * invstd;
});
});
} else {
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t {
return gO * invstd;
});
});
}
return grad_input;
}
void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) {
// NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored.
const double dummy_epsilon = 1e-5;
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_cuda_template<scalar_t, int32_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
case Impl::ChannelsLast: {
if ((!save_mean.defined() || save_mean.is_contiguous()) &&
(!save_var.defined() || save_var.is_contiguous())) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_channels_last_cuda_template<scalar_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector reduce_dims(ndim - 1);
reduce_dims[0] = 0;
for (int64_t i = 2; i < ndim; ++i) {
reduce_dims[i - 1] = i;
}
// For some reason this isn't an actual operator but it exists anyway...
at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims,
/*unbiased=*/false, /*keepdim=*/false);
return;
}
}
}
void batch_norm_update_stats(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
};
});
});
}
void batch_norm_update_stats_and_invert(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, double epsilon, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_output(save_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto eps = static_cast<acc_t>(epsilon);
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t, acc_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t, acc_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
c10::hip::compat::rsqrt(var + eps)
};
});
});
}
void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) {
auto iter = TensorIteratorConfig()
.add_output(out_invstd)
.add_input(running_var)
.check_all_same_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(),
"batch_norm_invert_std_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
auto eps = static_cast<acc_t>(epsilon);
gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t {
return c10::hip::compat::rsqrt(var + eps);
});
});
}
}
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined());
const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined());
TORCH_CHECK(has_running_mean == has_running_var);
if (train) {
batch_norm_mean_var(self, save_mean, save_invstd);
if (has_running_mean) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats_and_invert(
save_mean, save_invstd, *running_mean_opt, *running_var_opt,
momentum, epsilon, N);
} else {
batch_norm_calc_invstd(save_invstd, save_invstd, epsilon);
}
} else {
TORCH_CHECK(has_running_mean);
at::native::resize_output(save_mean, running_mean_opt->sizes());
save_mean.copy_(*running_mean_opt, /*non_blocking=*/true);
batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon);
}
batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd);
return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) {
auto output = at::empty_like(self);
int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_invstd = at::empty({n_input}, options);
at::native::batch_norm_cuda_out(
self,
weight_opt,
bias_opt,
running_mean_opt,
running_var_opt,
train,
momentum,
epsilon,
output,
save_mean,
save_invstd);
return std::make_tuple(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt);
c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt);
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2];
// Fused reducion & elementwise kernel
if (needs_reduction && grad_input_mask[0] &&
!batch_norm_use_channels_last_kernels(input) &&
cuda::detail::canUse32BitIndexMath(input) &&
cuda::detail::canUse32BitIndexMath(grad_out)) {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, *weight, *running_mean, *running_var);
if (mixed_type) {
return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
}
});
}
// NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward.
// However, this is also called from cudnn_batch_norm in eval mode which doesn't give
// save_mean and save_invstd, so it needs recalculated.
const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true);
Tensor mean;
TORCH_INTERNAL_ASSERT(save_mean->defined(), "save_mean should always be defined\n");
if (save_mean->numel() != 0) {
mean = *save_mean;
} else if (needs_reduction) {
TORCH_CHECK(!train && running_mean->defined());
mean = (running_mean->scalar_type() == acc_type) ?
*running_mean : running_mean->to(acc_type);
}
Tensor invstd;
TORCH_INTERNAL_ASSERT(save_invstd->defined(), "save_invstd should always be defined\n");
if (save_invstd->numel() != 0) {
invstd = *save_invstd;
} else {
TORCH_CHECK(!train && running_var->defined());
auto n_channels = input.sizes()[1];
invstd = at::empty({n_channels}, input.options().dtype(acc_type));
batch_norm_calc_invstd(invstd, *running_var, epsilon);
}
Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias;
if (needs_reduction) {
std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) =
batch_norm_backward_reduce_cuda(
grad_out, input, mean, invstd, *weight,
grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]);
}
Tensor grad_input;
if (grad_input_mask[0]) {
if (train) {
// NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction
grad_input = batch_norm_elementwise_backward_train(
grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu);
} else {
grad_input = batch_norm_elementwise_backward_eval(
grad_out, input, invstd, *weight);
}
}
return std::make_tuple(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) {
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto n_channels = self.size(1);
auto save_mean = at::empty({n_channels}, options);
auto save_invstd = at::empty({n_channels}, options);
bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "batch_norm_stats_cuda", [&] {
if (cuda::detail::canUse32BitIndexMath(self)) {
if (use_channels_last_kernel) {
batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>(
save_mean, save_invstd, self, epsilon);
} else {
batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
} else {
batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
});
return std::tuple<Tensor, Tensor>(save_mean, save_invstd);
}
Tensor batch_norm_elemt_cuda(
const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean,
const Tensor& invstd, double epsilon) {
auto output = at::empty_like(self);
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) {
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
// accepting input(self) here to determine template data types, since running_mean/running_var are optional
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
std::vector<int64_t> counts(mean.size(0), count);
Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU));
counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype());
return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
}
std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(
const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type();
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
} else {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
}
});
}
std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(grad_output) &&
batch_norm_use_channels_last_kernels(grad_output) &&
batch_norm_use_channels_last_kernels(input) &&
(!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()){
return batch_norm_backward_reduce_cuda_channels_last_template(
grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
const bool mixed_type = is_mixed_type(input, weight);
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(grad_output)) {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
} else {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
}
});
}
Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(self) &&
batch_norm_use_channels_last_kernels(self) &&
batch_norm_use_channels_last_kernels(input)) {
return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat;
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
} else {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda(
const Tensor& self, const c10::optional<Tensor>& running_mean_opt,
const c10::optional<Tensor>& running_var_opt, double momentum) {
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_var = at::empty({n_input}, options);
batch_norm_mean_var(self, save_mean, save_var);
TORCH_CHECK(running_mean->defined() == running_var->defined());
if (running_mean->defined()) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N);
}
return std::tuple<Tensor, Tensor>(save_mean, save_var);
}
} } // namespace at::native
|
d4bb8dc2dc96b0adb7a414192506d1f323fca4a5.cu
|
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/ReduceOps.h>
#include <ATen/native/Resize.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Resize.h>
#include <ATen/native/cuda/Normalization.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/batch_norm_backward_elemt_native.h>
#include <ATen/ops/batch_norm_backward_reduce_native.h>
#include <ATen/ops/batch_norm_elemt_native.h>
#include <ATen/ops/batch_norm_gather_stats_native.h>
#include <ATen/ops/batch_norm_gather_stats_with_counts_native.h>
#include <ATen/ops/batch_norm_stats_native.h>
#include <ATen/ops/batch_norm_update_stats_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/from_blob.h>
#include <ATen/ops/native_batch_norm_backward_native.h>
#include <ATen/ops/native_batch_norm_native.h>
#include <ATen/ops/scalar_tensor.h>
#endif
namespace at { namespace native {
namespace {
ScalarType first_type() {
return ScalarType::Undefined;
}
template <typename... Args>
ScalarType first_type(const Tensor& arg, const Args&... parameters) {
return arg.defined() ? arg.scalar_type() : first_type(parameters...);
}
// A transform is mixed type if the parameters are higher precision than the input
template <typename... Args>
bool is_mixed_type(const Tensor& input, const Args&... parameters) {
const auto parameter_type = first_type(parameters...);
return ((parameter_type != ScalarType::Undefined) &&
(parameter_type != input.scalar_type()));
}
inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) {
return (
self.is_contiguous(at::MemoryFormat::ChannelsLast) ||
self.is_contiguous(at::MemoryFormat::ChannelsLast3d) ||
(self.is_contiguous() && self.strides()[1] == 1)
);
}
enum class Impl {
Contiguous,
ChannelsLast,
General,
};
inline Impl batch_norm_choose_impl(const Tensor& self) {
if (!at::cuda::detail::canUse32BitIndexMath(self)) {
return Impl::General;
}
if (self.is_contiguous()) {
return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous;
}
if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) {
return Impl::ChannelsLast;
}
return Impl::General;
}
inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) {
auto imp1 = batch_norm_choose_impl(in1);
if (imp1 == Impl::General) {
return imp1;
}
auto imp2 = batch_norm_choose_impl(in2);
return imp1 == imp2 ? imp1 : Impl::General;
}
void batch_norm_elementwise(
const Tensor& out, const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean_, const Tensor& invstd_) {
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> bias = at::borrow_from_optional_tensor(bias_opt);
resize_output(out, self.sizes());
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(self, *weight, *bias);
if (mixed_type) {
batch_norm_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
} else {
batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
out, self, *weight, *bias, mean_, invstd_);
}
});
return;
}
case Impl::ChannelsLast: {
auto weight = at::borrow_from_optional_tensor(weight_opt);
auto bias = at::borrow_from_optional_tensor(bias_opt);
if (resize_output_check(out, self.sizes())) {
resize_impl_cuda_(out.unsafeGetTensorImpl(), self.sizes(), self.strides());
}
if ((out.strides() == self.strides()) &&
(!weight->defined() || weight->is_contiguous()) &&
(!bias->defined() || bias->is_contiguous()) &&
(!mean_.defined() || mean_.is_contiguous()) &&
(!invstd_.defined() || invstd_.is_contiguous())) {
batch_norm_elemt_channels_last_cuda_template(
out, self, *weight, *bias, mean_, invstd_);
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
// Helper to convert 1d tensors to an nd tensor that broadcasts with input
// All elements go into the channel dimension
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto weight = weight_opt.has_value() && weight_opt->defined() ?
as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options());
auto bias = bias_opt.has_value() && bias_opt->defined() ?
as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options());
auto mean = as_nd(mean_);
auto invstd = as_nd(invstd_);
auto iter = TensorIteratorConfig()
.add_output(out)
.add_input(self)
.add_input(weight)
.add_input(bias)
.add_input(mean)
.add_input(invstd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(),
"batch_norm_elementwise_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias,
acc_t mean, acc_t invstd) -> scalar_t {
return ((input - mean) * invstd) * weight + bias;
});
});
return;
}
}
}
Tensor batch_norm_elementwise_backward_train(
const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd,
const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) {
switch (batch_norm_choose_impl(input, grad_out)) {
case Impl::Contiguous: {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_elemt", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, weight);
if (mixed_type) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
});
}
case Impl::ChannelsLast: {
if ((!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()) {
return batch_norm_backward_elemt_channels_last_cuda_template(
grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu);
}
C10_FALLTHROUGH;
}
case Impl::General: {
const auto ndim = input.dim();
DimVector sizes(ndim, 1), strides(ndim, 0);
auto as_nd = [&](const Tensor& t) {
TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1);
sizes[1] = t.sizes()[0];
strides[1] = t.strides()[0];
return t.as_strided(sizes, strides);
};
auto invstd_nd = as_nd(invstd);
auto mean_nd = as_nd(mean);
auto sum_dy_nd = as_nd(sum_dy);
auto sum_dy_xmu_nd = as_nd(sum_dy_xmu);
auto weight_nd = weight.defined() ? as_nd(weight) :
at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type()));
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(input)
.add_input(weight_nd)
.add_input(mean_nd)
.add_input(invstd_nd)
.add_input(sum_dy_xmu_nd)
.add_input(sum_dy_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
auto norm_fct = static_cast<accscalar_t>(1.0 / (input.numel() /input.size(1)) );
gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight,
accscalar_t mean, accscalar_t invstd,
accscalar_t xmu, accscalar_t dy) -> scalar_t {
auto factor_1_c = invstd * invstd * xmu * norm_fct;
auto factor_2_c = weight * invstd;
auto m_dy_c = dy * norm_fct;
return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c;
});
});
return grad_input;
}
}
TORCH_INTERNAL_ASSERT(false);
}
Tensor batch_norm_elementwise_backward_eval(
const Tensor& grad_out, const Tensor& input,
const Tensor& invstd, const Tensor& weight) {
const auto ndim = input.dim();
DimVector shape(ndim, 1), strides(ndim, 0);
shape[1] = invstd.sizes()[0];
strides[1] = invstd.strides()[0];
auto invstd_nd = invstd.as_strided(shape, strides);
Tensor grad_input = at::empty(input.sizes(), grad_out.options());
if (weight.defined()) {
strides[1] = weight.strides()[0];
auto weight_nd = weight.as_strided(shape, strides);
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.add_input(weight_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight)
-> scalar_t {
return gO * weight * invstd;
});
});
} else {
auto iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad_out)
.add_input(invstd_nd)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(),
"batch_norm_eval_backward", [&]{
using accscalar_t = at::acc_type<scalar_t, true>;
gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t {
return gO * invstd;
});
});
}
return grad_input;
}
void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) {
// NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored.
const double dummy_epsilon = 1e-5;
switch (batch_norm_choose_impl(self)) {
case Impl::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_cuda_template<scalar_t, int32_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
case Impl::ChannelsLast: {
if ((!save_mean.defined() || save_mean.is_contiguous()) &&
(!save_var.defined() || save_var.is_contiguous())) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] {
batch_norm_stats_channels_last_cuda_template<scalar_t, Var>(
save_mean, save_var, self, dummy_epsilon);
});
return;
}
C10_FALLTHROUGH;
}
case Impl::General: {
const int64_t ndim = self.dim();
DimVector reduce_dims(ndim - 1);
reduce_dims[0] = 0;
for (int64_t i = 2; i < ndim; ++i) {
reduce_dims[i - 1] = i;
}
// For some reason this isn't an actual operator but it exists anyway...
at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims,
/*unbiased=*/false, /*keepdim=*/false);
return;
}
}
}
void batch_norm_update_stats(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
};
});
});
}
void batch_norm_update_stats_and_invert(
const Tensor& save_mean, const Tensor& save_var,
const Tensor& running_mean, const Tensor& running_var,
double momentum_, double epsilon, int64_t N) {
auto iter = TensorIteratorConfig()
.add_output(running_mean)
.add_output(running_var)
.add_output(save_var)
.add_input(save_mean)
.add_input(save_var)
.add_input(running_mean)
.add_input(running_var)
.check_all_same_dtype(false)
.promote_inputs_to_common_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(),
"batch_norm_update_stats_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
const auto bessel_correction_factor = static_cast<acc_t>(
static_cast<double>(N) / static_cast<double>(N - 1));
const auto eps = static_cast<acc_t>(epsilon);
const auto momentum = static_cast<acc_t>(momentum_);
gpu_kernel_multiple_outputs(
iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var)
-> thrust::tuple<scalar_t, scalar_t, acc_t> {
const auto unbiased_var = var * bessel_correction_factor;
return thrust::tuple<scalar_t, scalar_t, acc_t>{
mean * momentum + (1 - momentum) * running_mean,
unbiased_var * momentum + (1 - momentum) * running_var,
c10::cuda::compat::rsqrt(var + eps)
};
});
});
}
void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) {
auto iter = TensorIteratorConfig()
.add_output(out_invstd)
.add_input(running_var)
.check_all_same_dtype(false)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(),
"batch_norm_invert_std_cuda", [&] {
using acc_t = at::acc_type<scalar_t, true>;
auto eps = static_cast<acc_t>(epsilon);
gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t {
return c10::cuda::compat::rsqrt(var + eps);
});
});
}
}
std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) {
const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined());
const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined());
TORCH_CHECK(has_running_mean == has_running_var);
if (train) {
batch_norm_mean_var(self, save_mean, save_invstd);
if (has_running_mean) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats_and_invert(
save_mean, save_invstd, *running_mean_opt, *running_var_opt,
momentum, epsilon, N);
} else {
batch_norm_calc_invstd(save_invstd, save_invstd, epsilon);
}
} else {
TORCH_CHECK(has_running_mean);
at::native::resize_output(save_mean, running_mean_opt->sizes());
save_mean.copy_(*running_mean_opt, /*non_blocking=*/true);
batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon);
}
batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd);
return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) {
auto output = at::empty_like(self);
int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_invstd = at::empty({n_input}, options);
at::native::batch_norm_cuda_out(
self,
weight_opt,
bias_opt,
running_mean_opt,
running_var_opt,
train,
momentum,
epsilon,
output,
save_mean,
save_invstd);
return std::make_tuple(output, save_mean, save_invstd);
}
std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight = at::borrow_from_optional_tensor(weight_opt);
c10::MaybeOwned<Tensor> save_mean = at::borrow_from_optional_tensor(save_mean_opt);
c10::MaybeOwned<Tensor> save_invstd = at::borrow_from_optional_tensor(save_invstd_opt);
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2];
// Fused reducion & elementwise kernel
if (needs_reduction && grad_input_mask[0] &&
!batch_norm_use_channels_last_kernels(input) &&
cuda::detail::canUse32BitIndexMath(input) &&
cuda::detail::canUse32BitIndexMath(grad_out)) {
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"batch_norm_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const bool mixed_type = is_mixed_type(input, *weight, *running_mean, *running_var);
if (mixed_type) {
return batch_norm_backward_cuda_template<scalar_t, accscalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
} else {
return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(
grad_out, input, *weight, *running_mean, *running_var,
*save_mean, *save_invstd, train, epsilon, grad_input_mask);
}
});
}
// NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward.
// However, this is also called from cudnn_batch_norm in eval mode which doesn't give
// save_mean and save_invstd, so it needs recalculated.
const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true);
Tensor mean;
TORCH_INTERNAL_ASSERT(save_mean->defined(), "save_mean should always be defined\n");
if (save_mean->numel() != 0) {
mean = *save_mean;
} else if (needs_reduction) {
TORCH_CHECK(!train && running_mean->defined());
mean = (running_mean->scalar_type() == acc_type) ?
*running_mean : running_mean->to(acc_type);
}
Tensor invstd;
TORCH_INTERNAL_ASSERT(save_invstd->defined(), "save_invstd should always be defined\n");
if (save_invstd->numel() != 0) {
invstd = *save_invstd;
} else {
TORCH_CHECK(!train && running_var->defined());
auto n_channels = input.sizes()[1];
invstd = at::empty({n_channels}, input.options().dtype(acc_type));
batch_norm_calc_invstd(invstd, *running_var, epsilon);
}
Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias;
if (needs_reduction) {
std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) =
batch_norm_backward_reduce_cuda(
grad_out, input, mean, invstd, *weight,
grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]);
}
Tensor grad_input;
if (grad_input_mask[0]) {
if (train) {
// NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction
grad_input = batch_norm_elementwise_backward_train(
grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu);
} else {
grad_input = batch_norm_elementwise_backward_eval(
grad_out, input, invstd, *weight);
}
}
return std::make_tuple(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) {
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto n_channels = self.size(1);
auto save_mean = at::empty({n_channels}, options);
auto save_invstd = at::empty({n_channels}, options);
bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
self.scalar_type(), "batch_norm_stats_cuda", [&] {
if (cuda::detail::canUse32BitIndexMath(self)) {
if (use_channels_last_kernel) {
batch_norm_stats_channels_last_cuda_template<scalar_t, InvStd>(
save_mean, save_invstd, self, epsilon);
} else {
batch_norm_stats_cuda_template<scalar_t, int32_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
} else {
batch_norm_stats_cuda_template<scalar_t, int64_t, InvStd>(
save_mean, save_invstd, self, epsilon);
}
});
return std::tuple<Tensor, Tensor>(save_mean, save_invstd);
}
Tensor batch_norm_elemt_cuda(
const Tensor& self, const c10::optional<Tensor>& weight_opt,
const c10::optional<Tensor>& bias_opt, const Tensor& mean,
const Tensor& invstd, double epsilon) {
auto output = at::empty_like(self);
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt,
const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) {
// FIXME: Epsilon parameter isn't required, we don't take the reciprocal
batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd);
return output;
}
// accepting input(self) here to determine template data types, since running_mean/running_var are optional
std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
std::vector<int64_t> counts(mean.size(0), count);
Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU));
counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype());
return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_);
}
std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(
const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt);
const Tensor& running_mean = *running_mean_maybe_owned;
const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();});
auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type();
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
} else {
return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts);
}
});
}
std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(grad_output) &&
batch_norm_use_channels_last_kernels(grad_output) &&
batch_norm_use_channels_last_kernels(input) &&
(!weight.defined() || weight.is_contiguous()) &&
mean.is_contiguous() && invstd.is_contiguous()){
return batch_norm_backward_reduce_cuda_channels_last_template(
grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
const bool mixed_type = is_mixed_type(input, weight);
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(grad_output)) {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
} else {
if (mixed_type) {
return batch_norm_backward_reduce_cuda_template<scalar_t, accscalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
} else {
return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g);
}
}
});
}
Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
if (at::cuda::detail::canUse32BitIndexMath(self) &&
batch_norm_use_channels_last_kernels(self) &&
batch_norm_use_channels_last_kernels(input)) {
return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] {
auto mean_st = mean.dtype();
auto invstd_st = invstd.dtype();
TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types");
bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat;
bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat;
using accscalar_t = at::acc_type<scalar_t, true>;
if (cuda::detail::canUse32BitIndexMath(self)) {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
} else {
if (is_half_float || is_bfloat16_float) {
return batch_norm_backward_elemt_cuda_template<scalar_t, accscalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
} else {
return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
}
}
});
}
std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda(
const Tensor& self, const c10::optional<Tensor>& running_mean_opt,
const c10::optional<Tensor>& running_var_opt, double momentum) {
c10::MaybeOwned<Tensor> running_mean = at::borrow_from_optional_tensor(running_mean_opt);
c10::MaybeOwned<Tensor> running_var = at::borrow_from_optional_tensor(running_var_opt);
const int64_t n_input = self.size(1);
auto options = self.options().dtype(
at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true));
auto save_mean = at::empty({n_input}, options);
auto save_var = at::empty({n_input}, options);
batch_norm_mean_var(self, save_mean, save_var);
TORCH_CHECK(running_mean->defined() == running_var->defined());
if (running_mean->defined()) {
const int64_t N = self.numel() / save_mean.numel();
batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N);
}
return std::tuple<Tensor, Tensor>(save_mean, save_var);
}
} } // namespace at::native
|
8a3e08f8eaecea34b12c1e8d715d323680380ab2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sphere_raycasting.cuh"
//#include "DevUtils.cuh"
#include <cmath>
namespace rch {
#pragma region Utility functions
__device__ void setCol(float *pixel, float r, float g, float b, float a = 1) {
pixel[0] = r; // red
pixel[1] = g; // green
pixel[2] = b; // blue
pixel[3] = a; // alpha
};
__device__ void setCol(float *pixel, const CuVec3& rgb, float a = 1) {
pixel[0] = rgb.x; // red
pixel[1] = rgb.y; // green
pixel[2] = rgb.z; // blue
pixel[3] = a; // alpha
};
#pragma endregion
__global__ void kernel_sphere_raycast(unsigned char *surface, int width,
int height, size_t pitch,
const CuSphere *spheres, std::size_t spheresNum,
const CuLightSrc *lights,
std::size_t ligthsNum, float* invMtxVP) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Skip the redundant threads (quantization)
if (x >= width || y >= height) return;
// Pointer to the pixel at (x,y)
float* pixel = (float *)(surface + y * pitch) + 4 * x;
// Clear RenderTarget to black colour
setCol(pixel, 0.1f, 0.1f, 0.1f, 1);
// Projection requires NDC
float x2 = (2.0f * x) / width - 1.0f;
float y2 = 1.0f - (2.0f * y) / height; /* Y is flipped */
//// Get Ray's origin and direction in world-space
const CuVec3 rayOrigC = {x2,y2,0};
const CuVec3 rayOrigW = mtxVecRMul_4x4(rayOrigC, 1, invMtxVP);
const CuVec3 rayDestC = {x2,y2,1};
const CuVec3 rayDestW = mtxVecRMul_4x4(rayDestC, 1, invMtxVP);
const CuVec3 rayDirW = CuVec3::getNormalized(rayDestW - rayOrigW);
// The Raycasting
float tBuff = INFINITY; // z-buffer that uses tSol
for (std::size_t i = 0; i < spheresNum; i++) {
float tSol = 0;
if (raySphereIntersection(
rayOrigW, rayDirW, spheres[i].pos, spheres[i].radius, &tSol)) {
const CuVec3 spherePt = rayOrigW + (tSol * rayDirW);
// t-buff checking
if (tSol >= tBuff) {
continue;
}
tBuff = tSol;
const CuVec3 norm = CuVec3::getNormalized(spherePt - spheres[i].pos);
const CuVec3 toEye = -1.f * rayDirW; // unit-vec * -1 = unit-vec
// Since for every pixel there is a toEye vector,
// specular may occur multiple times
const auto col = calcPhongReflection(spherePt, norm, toEye, lights, ligthsNum);
//const auto col = calcBlinnPhongReflection(spherePt, norm, toEye, lights, ligthsNum);
setCol(pixel, col);
}
}
}
void sphere_raycast_to_tex2d(const CuSphere *spheresArr, std::size_t spheresNum,
const CuLightSrc *lightsArr, std::size_t lightsNum,
const cuda::InteropTexture2D &itex,
const XMMATRIX &xmInvVpMtx) {
hipArray *cuArray;
auto err = hipGraphicsSubResourceGetMappedArray(&cuArray,
itex.m_cudaResource, 0, 0);
cuda::gpuErrorCheck(err, "[hipGraphicsSubResourceGetMappedArray]");
float *invVpMtx = cuda::xmmatToMtx4x4(xmInvVpMtx);
float *devInvVpMtx = cuda::copyToDevMem(invVpMtx, 16);
/*
Run CUDA kernel
*/
dim3 Db = dim3(32, 32);
dim3 Dg = dim3(::ceil(itex.m_width / Db.x),
::ceil((itex.m_height) / Db.y) + 1);
hipLaunchKernelGGL(( kernel_sphere_raycast), dim3(Dg), dim3(Db), 0, 0, (unsigned char *)itex.m_cudaLinearMemory,
itex.m_width, itex.m_height, itex.m_pitch,
spheresArr, spheresNum, lightsArr,
lightsNum, devInvVpMtx);
/*
Cleanup
*/
delete[] invVpMtx;
err = hipFree(devInvVpMtx);
cuda::gpuErrorCheck(err, "[sphere_raycast_to_tex2d] hipFree(devInvVpMtx)");
/*
Copy the results
*/
// Copy cudaLinearMemory to the D3D texture, via its mapped form : hipArray
// "itex.m_width * 4" since we re using rgba
err = hipMemcpy2DToArray(cuArray, // dst array
0, 0, // offset
itex.m_cudaLinearMemory, itex.m_pitch, // src
(size_t)itex.m_width * 4 * sizeof(float),
itex.m_height, // extent
hipMemcpyDeviceToDevice); // kind
cuda::gpuErrorCheck(err, "[hipMemcpy2DToArray]");
}
// @See solveRaySphereIntersection() in Maths3D.cpp
__device__ bool raySphereIntersection(const CuVec3 &rayOrigin,
const CuVec3 &rayDirection,
const CuVec3 &spherePos, float radius,
float *sol1, float *sol2) {
/*
Get coefficients for the quadratic equation
*/
// Ray origin transformed to the frame of reference matching
// the spheres position.
const CuVec3 tRayOrig = {rayOrigin.x - spherePos.x, rayOrigin.y - spherePos.y,
rayOrigin.z - spherePos.z};
// t^2 * dot(dir, dir)
float a = (rayDirection.x * rayDirection.x) +
(rayDirection.y * rayDirection.y) +
(rayDirection.z * rayDirection.z);
// 2*t * dot(dir, org - C)
float b =
2.0f * ((rayDirection.x * tRayOrig.x) + (rayDirection.y * tRayOrig.y) +
(rayDirection.z * tRayOrig.z));
// dot(orig - C, orig - C) - r^2
float c = ((tRayOrig.x * tRayOrig.x) + (tRayOrig.y * tRayOrig.y) +
(tRayOrig.z * tRayOrig.z)) -
(radius * radius);
/*
Find the intersection points, i.e. solve quadratic eq.
*/
float delta = (b * b) - (4 * a * c);
if (delta < 0.0f) {
return false;
}
const double sqrtDelta = std::sqrt(delta);
if (delta == 0.0f) {
*sol1 = static_cast<float>(-(b / (2.0 * a)));
*sol2 = *sol1;
} else {
*sol1 = static_cast<float>((-b + sqrtDelta) / (2.0 * a));
*sol2 = static_cast<float>((-b - sqrtDelta) / (2.0 * a));
}
return true;
}
// Wraper that returns the closest solution
__device__ bool raySphereIntersection(const CuVec3 &rayOrigin,
const CuVec3 &rayDirection,
const CuVec3 &spherePos, float radius,
float *sol) {
float s1 = -1;
float s2 = -1;
if (raySphereIntersection(rayOrigin, rayDirection, spherePos, radius, &s1, &s2)) {
if (s1 < s2) { *sol = s1; }
else { *sol = s2; }
return true;
}
return false;
}
// Foreach light : result += Ambient + Diffuse + Specular
__device__ CuVec3 calcBlinnPhongReflection(CuVec3 point, CuVec3 norm,
CuVec3 toEye,
const CuLightSrc *lights,
std::size_t lightNum) {
// constexpr float ka = 0.1f; // ambient constant
constexpr float kd = 0.5f; // diffuse reflection constant
constexpr float ks = 0.5f; // specular reflection constant
constexpr float m = 100.f; // shininess of a material
CuVec3 resCol = {0, 0, 0};
for (int i = 0; i < lightNum; i++) {
const CuVec3 lightCol = lights[i].col;
const CuVec3 lightPos = lights[i].pos;
const CuVec3 toLight = CuVec3::getNormalized(lightPos - point);
const CuVec3 halfVec = CuVec3::getNormalized(toEye + toLight);
float dotLN = CuVec3::dotProduct(toLight, norm);
dotLN = dotLN > 1.f ? 1.f : dotLN;
dotLN = dotLN < 0.f ? 0.f : dotLN;
//// Diffuse
// resCol += lightCol * kd * dotLN;
// Specular
float nh = CuVec3::dotProduct(norm, halfVec);
nh = nh > 1.f ? 1.f : nh;
nh = nh < 0.f ? 0.f : nh;
nh = pow(nh, m);
nh *= ks;
resCol += lightCol * nh;
}
resCol.saturate();
return resCol;
}
// Foreach light : result += Ambient + Diffuse + Specular
__device__ CuVec3 calcPhongReflection(CuVec3 point, CuVec3 norm, CuVec3 toEye,
const CuLightSrc *lights,
std::size_t lightNum) {
constexpr float ka = 0.1f; // ambient constant
constexpr float kd = 0.5f; // diffuse reflection constant
constexpr float ks = 0.5f; // specular reflection constant
constexpr float alf = 1000.f; // shininess of a material
CuVec3 resCol = {ka, ka, ka}; // ambient
// CuVec3 resCol = {0, 0, 0};
for (int i = 0; i < lightNum; i++) {
const CuVec3 lightCol = lights[i].col;
const CuVec3 lightPos = lights[i].pos;
const CuVec3 toLight = CuVec3::getNormalized(lightPos - point);
const float dotLN = CuVec3::dotProduct(toLight, norm);
const CuVec3 reflection = (2.f * dotLN * norm) - toLight;
//// Diffuse
resCol += lightCol * kd * dotLN;
// Specular
resCol +=
lightCol * ks * std::powf(CuVec3::dotProduct(reflection, toEye), alf);
}
resCol.saturate();
return resCol;
}
} // namespace rch
|
8a3e08f8eaecea34b12c1e8d715d323680380ab2.cu
|
#include "sphere_raycasting.cuh"
//#include "DevUtils.cuh"
#include <cmath>
namespace rch {
#pragma region Utility functions
__device__ void setCol(float *pixel, float r, float g, float b, float a = 1) {
pixel[0] = r; // red
pixel[1] = g; // green
pixel[2] = b; // blue
pixel[3] = a; // alpha
};
__device__ void setCol(float *pixel, const CuVec3& rgb, float a = 1) {
pixel[0] = rgb.x; // red
pixel[1] = rgb.y; // green
pixel[2] = rgb.z; // blue
pixel[3] = a; // alpha
};
#pragma endregion
__global__ void kernel_sphere_raycast(unsigned char *surface, int width,
int height, size_t pitch,
const CuSphere *spheres, std::size_t spheresNum,
const CuLightSrc *lights,
std::size_t ligthsNum, float* invMtxVP) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Skip the redundant threads (quantization)
if (x >= width || y >= height) return;
// Pointer to the pixel at (x,y)
float* pixel = (float *)(surface + y * pitch) + 4 * x;
// Clear RenderTarget to black colour
setCol(pixel, 0.1f, 0.1f, 0.1f, 1);
// Projection requires NDC
float x2 = (2.0f * x) / width - 1.0f;
float y2 = 1.0f - (2.0f * y) / height; /* Y is flipped */
//// Get Ray's origin and direction in world-space
const CuVec3 rayOrigC = {x2,y2,0};
const CuVec3 rayOrigW = mtxVecRMul_4x4(rayOrigC, 1, invMtxVP);
const CuVec3 rayDestC = {x2,y2,1};
const CuVec3 rayDestW = mtxVecRMul_4x4(rayDestC, 1, invMtxVP);
const CuVec3 rayDirW = CuVec3::getNormalized(rayDestW - rayOrigW);
// The Raycasting
float tBuff = INFINITY; // z-buffer that uses tSol
for (std::size_t i = 0; i < spheresNum; i++) {
float tSol = 0;
if (raySphereIntersection(
rayOrigW, rayDirW, spheres[i].pos, spheres[i].radius, &tSol)) {
const CuVec3 spherePt = rayOrigW + (tSol * rayDirW);
// t-buff checking
if (tSol >= tBuff) {
continue;
}
tBuff = tSol;
const CuVec3 norm = CuVec3::getNormalized(spherePt - spheres[i].pos);
const CuVec3 toEye = -1.f * rayDirW; // unit-vec * -1 = unit-vec
// Since for every pixel there is a toEye vector,
// specular may occur multiple times
const auto col = calcPhongReflection(spherePt, norm, toEye, lights, ligthsNum);
//const auto col = calcBlinnPhongReflection(spherePt, norm, toEye, lights, ligthsNum);
setCol(pixel, col);
}
}
}
void sphere_raycast_to_tex2d(const CuSphere *spheresArr, std::size_t spheresNum,
const CuLightSrc *lightsArr, std::size_t lightsNum,
const cuda::InteropTexture2D &itex,
const XMMATRIX &xmInvVpMtx) {
cudaArray *cuArray;
auto err = cudaGraphicsSubResourceGetMappedArray(&cuArray,
itex.m_cudaResource, 0, 0);
cuda::gpuErrorCheck(err, "[cudaGraphicsSubResourceGetMappedArray]");
float *invVpMtx = cuda::xmmatToMtx4x4(xmInvVpMtx);
float *devInvVpMtx = cuda::copyToDevMem(invVpMtx, 16);
/*
Run CUDA kernel
*/
dim3 Db = dim3(32, 32);
dim3 Dg = dim3(std::ceil(itex.m_width / Db.x),
std::ceil((itex.m_height) / Db.y) + 1);
kernel_sphere_raycast<<<Dg, Db>>>((unsigned char *)itex.m_cudaLinearMemory,
itex.m_width, itex.m_height, itex.m_pitch,
spheresArr, spheresNum, lightsArr,
lightsNum, devInvVpMtx);
/*
Cleanup
*/
delete[] invVpMtx;
err = cudaFree(devInvVpMtx);
cuda::gpuErrorCheck(err, "[sphere_raycast_to_tex2d] cudaFree(devInvVpMtx)");
/*
Copy the results
*/
// Copy cudaLinearMemory to the D3D texture, via its mapped form : cudaArray
// "itex.m_width * 4" since we re using rgba
err = cudaMemcpy2DToArray(cuArray, // dst array
0, 0, // offset
itex.m_cudaLinearMemory, itex.m_pitch, // src
(size_t)itex.m_width * 4 * sizeof(float),
itex.m_height, // extent
cudaMemcpyDeviceToDevice); // kind
cuda::gpuErrorCheck(err, "[cudaMemcpy2DToArray]");
}
// @See solveRaySphereIntersection() in Maths3D.cpp
__device__ bool raySphereIntersection(const CuVec3 &rayOrigin,
const CuVec3 &rayDirection,
const CuVec3 &spherePos, float radius,
float *sol1, float *sol2) {
/*
Get coefficients for the quadratic equation
*/
// Ray origin transformed to the frame of reference matching
// the spheres position.
const CuVec3 tRayOrig = {rayOrigin.x - spherePos.x, rayOrigin.y - spherePos.y,
rayOrigin.z - spherePos.z};
// t^2 * dot(dir, dir)
float a = (rayDirection.x * rayDirection.x) +
(rayDirection.y * rayDirection.y) +
(rayDirection.z * rayDirection.z);
// 2*t * dot(dir, org - C)
float b =
2.0f * ((rayDirection.x * tRayOrig.x) + (rayDirection.y * tRayOrig.y) +
(rayDirection.z * tRayOrig.z));
// dot(orig - C, orig - C) - r^2
float c = ((tRayOrig.x * tRayOrig.x) + (tRayOrig.y * tRayOrig.y) +
(tRayOrig.z * tRayOrig.z)) -
(radius * radius);
/*
Find the intersection points, i.e. solve quadratic eq.
*/
float delta = (b * b) - (4 * a * c);
if (delta < 0.0f) {
return false;
}
const double sqrtDelta = std::sqrt(delta);
if (delta == 0.0f) {
*sol1 = static_cast<float>(-(b / (2.0 * a)));
*sol2 = *sol1;
} else {
*sol1 = static_cast<float>((-b + sqrtDelta) / (2.0 * a));
*sol2 = static_cast<float>((-b - sqrtDelta) / (2.0 * a));
}
return true;
}
// Wraper that returns the closest solution
__device__ bool raySphereIntersection(const CuVec3 &rayOrigin,
const CuVec3 &rayDirection,
const CuVec3 &spherePos, float radius,
float *sol) {
float s1 = -1;
float s2 = -1;
if (raySphereIntersection(rayOrigin, rayDirection, spherePos, radius, &s1, &s2)) {
if (s1 < s2) { *sol = s1; }
else { *sol = s2; }
return true;
}
return false;
}
// Foreach light : result += Ambient + Diffuse + Specular
__device__ CuVec3 calcBlinnPhongReflection(CuVec3 point, CuVec3 norm,
CuVec3 toEye,
const CuLightSrc *lights,
std::size_t lightNum) {
// constexpr float ka = 0.1f; // ambient constant
constexpr float kd = 0.5f; // diffuse reflection constant
constexpr float ks = 0.5f; // specular reflection constant
constexpr float m = 100.f; // shininess of a material
CuVec3 resCol = {0, 0, 0};
for (int i = 0; i < lightNum; i++) {
const CuVec3 lightCol = lights[i].col;
const CuVec3 lightPos = lights[i].pos;
const CuVec3 toLight = CuVec3::getNormalized(lightPos - point);
const CuVec3 halfVec = CuVec3::getNormalized(toEye + toLight);
float dotLN = CuVec3::dotProduct(toLight, norm);
dotLN = dotLN > 1.f ? 1.f : dotLN;
dotLN = dotLN < 0.f ? 0.f : dotLN;
//// Diffuse
// resCol += lightCol * kd * dotLN;
// Specular
float nh = CuVec3::dotProduct(norm, halfVec);
nh = nh > 1.f ? 1.f : nh;
nh = nh < 0.f ? 0.f : nh;
nh = pow(nh, m);
nh *= ks;
resCol += lightCol * nh;
}
resCol.saturate();
return resCol;
}
// Foreach light : result += Ambient + Diffuse + Specular
__device__ CuVec3 calcPhongReflection(CuVec3 point, CuVec3 norm, CuVec3 toEye,
const CuLightSrc *lights,
std::size_t lightNum) {
constexpr float ka = 0.1f; // ambient constant
constexpr float kd = 0.5f; // diffuse reflection constant
constexpr float ks = 0.5f; // specular reflection constant
constexpr float alf = 1000.f; // shininess of a material
CuVec3 resCol = {ka, ka, ka}; // ambient
// CuVec3 resCol = {0, 0, 0};
for (int i = 0; i < lightNum; i++) {
const CuVec3 lightCol = lights[i].col;
const CuVec3 lightPos = lights[i].pos;
const CuVec3 toLight = CuVec3::getNormalized(lightPos - point);
const float dotLN = CuVec3::dotProduct(toLight, norm);
const CuVec3 reflection = (2.f * dotLN * norm) - toLight;
//// Diffuse
resCol += lightCol * kd * dotLN;
// Specular
resCol +=
lightCol * ks * std::powf(CuVec3::dotProduct(reflection, toEye), alf);
}
resCol.saturate();
return resCol;
}
} // namespace rch
|
c8af2b7dd5e1e91e69bcf040bf05bba8c5b269b6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "CudaMaterials.cuh"
__device__ bool LambertianScatter(const cudaRay& aRay, const cudaHitRecord& aHitRecord, hiprandState_t* aRandState, cuda3DVector& anAttenuation, cudaRay& aScattered)
{
cuda3DVector scatterDirection = cuda3DVector{ aHitRecord.hitNormal } + CudaGetRandVectorInUnitSphere(aRandState);
if (CudaCheckNearZero(scatterDirection))
scatterDirection = aHitRecord.hitNormal; //avoid NaNs
aScattered = cudaRay{ aHitRecord.point, scatterDirection };
anAttenuation = aHitRecord.material.albedo;
return true;
}
__device__ bool MetalScatter(const cudaRay& aRay, const cudaHitRecord& aHitRecord, hiprandState_t* aRandState, cuda3DVector& anAttenuation, cudaRay& aScattered)
{
cuda3DVector reflected = CudaReflect(CudaNormalize(aRay.direction), aHitRecord.hitNormal);
aScattered = cudaRay{ aHitRecord.point, reflected + (CudaGetRandVectorInUnitSphere(aRandState) * aHitRecord.material.fuzz) };
anAttenuation = aHitRecord.material.albedo;
return (CudaDotProduct(aScattered.direction, aHitRecord.hitNormal) > 0);
}
__device__ bool DielectricScatter(const cudaRay& aRay, const cudaHitRecord& aHitRecord, hiprandState_t* aRandState, cuda3DVector& anAttenuation, cudaRay& aScattered)
{
anAttenuation = cuda3DVector{ 1.0f, 1.0f, 1.0f };
float refractionRatio = (CudaDotProduct(aRay.direction, aHitRecord.hitNormal) < 0.0f) ? 1.0f / aHitRecord.material.aRefractionIndex : aHitRecord.material.aRefractionIndex;
cuda3DVector direction = CudaNormalize(aRay.direction);
float cosTheta = fmin(CudaDotProduct(-direction, aHitRecord.hitNormal), 1.0f);
float sinTheta = sqrt(1.0f - cosTheta * cosTheta);
bool cannotRefract = refractionRatio * sinTheta > 1.0f;
cuda3DVector bounced;
if (cannotRefract) //Alternative: Use Schlick approximation || CudaGetReflectante(cosTheta, refractionRatio) > CudaGetRandInRange(aRandState, 0.0f, 1.0f))
bounced = CudaReflect(direction, aHitRecord.hitNormal);
else
bounced = CudaRefract(direction, aHitRecord.hitNormal, refractionRatio);
aScattered = cudaRay{ aHitRecord.point, bounced };
return true;
}
|
c8af2b7dd5e1e91e69bcf040bf05bba8c5b269b6.cu
|
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "CudaMaterials.cuh"
__device__ bool LambertianScatter(const cudaRay& aRay, const cudaHitRecord& aHitRecord, curandState* aRandState, cuda3DVector& anAttenuation, cudaRay& aScattered)
{
cuda3DVector scatterDirection = cuda3DVector{ aHitRecord.hitNormal } + CudaGetRandVectorInUnitSphere(aRandState);
if (CudaCheckNearZero(scatterDirection))
scatterDirection = aHitRecord.hitNormal; //avoid NaNs
aScattered = cudaRay{ aHitRecord.point, scatterDirection };
anAttenuation = aHitRecord.material.albedo;
return true;
}
__device__ bool MetalScatter(const cudaRay& aRay, const cudaHitRecord& aHitRecord, curandState* aRandState, cuda3DVector& anAttenuation, cudaRay& aScattered)
{
cuda3DVector reflected = CudaReflect(CudaNormalize(aRay.direction), aHitRecord.hitNormal);
aScattered = cudaRay{ aHitRecord.point, reflected + (CudaGetRandVectorInUnitSphere(aRandState) * aHitRecord.material.fuzz) };
anAttenuation = aHitRecord.material.albedo;
return (CudaDotProduct(aScattered.direction, aHitRecord.hitNormal) > 0);
}
__device__ bool DielectricScatter(const cudaRay& aRay, const cudaHitRecord& aHitRecord, curandState* aRandState, cuda3DVector& anAttenuation, cudaRay& aScattered)
{
anAttenuation = cuda3DVector{ 1.0f, 1.0f, 1.0f };
float refractionRatio = (CudaDotProduct(aRay.direction, aHitRecord.hitNormal) < 0.0f) ? 1.0f / aHitRecord.material.aRefractionIndex : aHitRecord.material.aRefractionIndex;
cuda3DVector direction = CudaNormalize(aRay.direction);
float cosTheta = fmin(CudaDotProduct(-direction, aHitRecord.hitNormal), 1.0f);
float sinTheta = sqrt(1.0f - cosTheta * cosTheta);
bool cannotRefract = refractionRatio * sinTheta > 1.0f;
cuda3DVector bounced;
if (cannotRefract) //Alternative: Use Schlick approximation || CudaGetReflectante(cosTheta, refractionRatio) > CudaGetRandInRange(aRandState, 0.0f, 1.0f))
bounced = CudaReflect(direction, aHitRecord.hitNormal);
else
bounced = CudaRefract(direction, aHitRecord.hitNormal, refractionRatio);
aScattered = cudaRay{ aHitRecord.point, bounced };
return true;
}
|
5694a1e49a89e368d312af9160173407cc593c2c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/features/device/eigen.hpp"
using namespace pcl::gpu;
namespace pcl
{
namespace device
{
struct NormalsEstimator
{
enum
{
CTA_SIZE = 256,
WAPRS = CTA_SIZE / Warp::WARP_SIZE,
MIN_NEIGHBOORS = 1
};
struct plus
{
__forceinline__ __device__ float operator()(const float &lhs, const volatile float& rhs) const { return lhs + rhs; }
};
PtrStep<int> indices;
const int *sizes;
const PointType *points;
PtrSz<NormalType> normals;
__device__ __forceinline__ void operator()() const
{
__shared__ float cov_buffer[6][CTA_SIZE + 1];
int warp_idx = Warp::id();
int idx = blockIdx.x * WAPRS + warp_idx;
if (idx >= normals.size)
return;
int size = sizes[idx];
int lane = Warp::laneId();
if (size < MIN_NEIGHBOORS)
{
const float NaN = numeric_limits<float>::quiet_NaN();
if (lane == 0)
normals.data[idx] = make_float4(NaN, NaN, NaN, NaN);
}
const int *ibeg = indices.ptr(idx);
const int *iend = ibeg + size;
//copmpute centroid
float3 c = make_float3(0.f, 0.f, 0.f);
for(const int *t = ibeg + lane; t < iend; t += Warp::STRIDE)
c += fetch(*t);
volatile float *buffer = &cov_buffer[0][threadIdx.x - lane];
c.x = Warp::reduce(buffer, c.x, plus());
c.y = Warp::reduce(buffer, c.y, plus());
c.z = Warp::reduce(buffer, c.z, plus());
c *= 1.f/size;
//nvcc bug workaround. if comment this => c.z == 0 at line: float3 d = fetch(*t) - c;
__threadfence_block();
//compute covariance matrix
int tid = threadIdx.x;
for(int i = 0; i < 6; ++i)
cov_buffer[i][tid] = 0.f;
for(const int *t = ibeg + lane; t < iend; t += Warp::STRIDE)
{
//float3 d = fetch(*t) - c;
float3 p = fetch(*t);
float3 d = p - c;
cov_buffer[0][tid] += d.x * d.x; //cov (0, 0)
cov_buffer[1][tid] += d.x * d.y; //cov (0, 1)
cov_buffer[2][tid] += d.x * d.z; //cov (0, 2)
cov_buffer[3][tid] += d.y * d.y; //cov (1, 1)
cov_buffer[4][tid] += d.y * d.z; //cov (1, 2)
cov_buffer[5][tid] += d.z * d.z; //cov (2, 2)
}
Warp::reduce(&cov_buffer[0][tid - lane], plus());
Warp::reduce(&cov_buffer[1][tid - lane], plus());
Warp::reduce(&cov_buffer[2][tid - lane], plus());
Warp::reduce(&cov_buffer[3][tid - lane], plus());
Warp::reduce(&cov_buffer[4][tid - lane], plus());
Warp::reduce(&cov_buffer[5][tid - lane], plus());
volatile float *cov = &cov_buffer[0][tid-lane];
if (lane < 6)
cov[lane] = cov_buffer[lane][tid-lane];
//solvePlaneParameters
if (lane == 0)
{
// Extract the eigenvalues and eigenvectors
typedef Eigen33::Mat33 Mat33;
Eigen33 eigen33(&cov[lane]);
Mat33& tmp = (Mat33&)cov_buffer[1][tid - lane];
Mat33& vec_tmp = (Mat33&)cov_buffer[2][tid - lane];
Mat33& evecs = (Mat33&)cov_buffer[3][tid - lane];
float3 evals;
eigen33.compute(tmp, vec_tmp, evecs, evals);
//evecs[0] - eigenvector with the lowerst eigenvalue
// Compute the curvature surface change
float eig_sum = evals.x + evals.y + evals.z;
float curvature = (eig_sum == 0) ? 0 : fabsf( evals.x / eig_sum );
NormalType output;
output.w = curvature;
// The normalization is not necessary, since the eigenvectors from Eigen33 are already normalized
output.x = evecs[0].x;
output.y = evecs[0].y;
output.z = evecs[0].z;
normals.data[idx] = output;
}
}
__device__ __forceinline__ float3 fetch(int idx) const
{
/*PointType p = points[idx];
return make_float3(p.x, p.y, p.z);*/
return *(float3*)&points[idx];
}
};
__global__ void EstimateNormaslKernel(const NormalsEstimator est) { est(); }
struct FlipNormal
{
const PointType* cloud;
float3 vp;
mutable PtrSz<NormalType> normals;
__device__ __forceinline__ void operator()(int idx, const float3& p) const
{
NormalType n = normals[idx];
float vp_x = vp.x - p.x;
float vp_y = vp.y - p.y;
float vp_z = vp.z - p.z;
// Dot product between the (viewpoint - point) and the plane normal
float cos_theta = vp_x * n.x + vp_y * n.y + vp_z * n.z;
// Flip the plane normal
if (cos_theta < 0)
{
n.x *= -1;
n.y *= -1;
n.z *= -1;
normals[idx] = n;
}
}
};
__global__ void flipNormalTowardsViewpointKernel(const FlipNormal flip)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < flip.normals.size)
{
float3 p = *(float3*)&flip.cloud[idx];
flip(idx, p);
}
}
__global__ void flipNormalTowardsViewpointKernel(const FlipNormal flip, const int* indices)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < flip.normals.size)
{
float3 p = *(float3*)&flip.cloud[indices[idx]];
flip(idx, p);
}
}
}
}
void pcl::device::computeNormals(const PointCloud& cloud, const NeighborIndices& nn_indices, Normals& normals)
{
NormalsEstimator est;
est.indices = nn_indices;
est.sizes = nn_indices.sizes;
est.points = cloud;
est.normals = normals;
//printFuncAttrib(EstimateNormaslKernel);
int block = NormalsEstimator::CTA_SIZE;
int grid = divUp((int)normals.size(), NormalsEstimator::WAPRS);
hipLaunchKernelGGL(( EstimateNormaslKernel), dim3(grid), dim3(block), 0, 0, est);
cudaSafeCall( hipGetLastError() );
cudaSafeCall(hipDeviceSynchronize());
}
void pcl::device::flipNormalTowardsViewpoint(const PointCloud& cloud, const float3& vp, Normals& normals)
{
int block = 256;
int grid = divUp((int)normals.size(), block);
FlipNormal flip;
flip.cloud = cloud;
flip.vp = vp;
flip.normals = normals;
hipLaunchKernelGGL(( flipNormalTowardsViewpointKernel), dim3(grid), dim3(block), 0, 0, flip);
cudaSafeCall( hipGetLastError() );
cudaSafeCall(hipDeviceSynchronize());
}
void pcl::device::flipNormalTowardsViewpoint(const PointCloud& cloud, const Indices& indices, const float3& vp, Normals& normals)
{
int block = 256;
int grid = divUp((int)normals.size(), block);
FlipNormal flip;
flip.cloud = cloud;
flip.vp = vp;
flip.normals = normals;
hipLaunchKernelGGL(( flipNormalTowardsViewpointKernel), dim3(grid), dim3(block), 0, 0, flip, indices.ptr());
cudaSafeCall( hipGetLastError() );
cudaSafeCall(hipDeviceSynchronize());
}
|
5694a1e49a89e368d312af9160173407cc593c2c.cu
|
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/funcattrib.hpp"
#include "pcl/gpu/features/device/eigen.hpp"
using namespace pcl::gpu;
namespace pcl
{
namespace device
{
struct NormalsEstimator
{
enum
{
CTA_SIZE = 256,
WAPRS = CTA_SIZE / Warp::WARP_SIZE,
MIN_NEIGHBOORS = 1
};
struct plus
{
__forceinline__ __device__ float operator()(const float &lhs, const volatile float& rhs) const { return lhs + rhs; }
};
PtrStep<int> indices;
const int *sizes;
const PointType *points;
PtrSz<NormalType> normals;
__device__ __forceinline__ void operator()() const
{
__shared__ float cov_buffer[6][CTA_SIZE + 1];
int warp_idx = Warp::id();
int idx = blockIdx.x * WAPRS + warp_idx;
if (idx >= normals.size)
return;
int size = sizes[idx];
int lane = Warp::laneId();
if (size < MIN_NEIGHBOORS)
{
const float NaN = numeric_limits<float>::quiet_NaN();
if (lane == 0)
normals.data[idx] = make_float4(NaN, NaN, NaN, NaN);
}
const int *ibeg = indices.ptr(idx);
const int *iend = ibeg + size;
//copmpute centroid
float3 c = make_float3(0.f, 0.f, 0.f);
for(const int *t = ibeg + lane; t < iend; t += Warp::STRIDE)
c += fetch(*t);
volatile float *buffer = &cov_buffer[0][threadIdx.x - lane];
c.x = Warp::reduce(buffer, c.x, plus());
c.y = Warp::reduce(buffer, c.y, plus());
c.z = Warp::reduce(buffer, c.z, plus());
c *= 1.f/size;
//nvcc bug workaround. if comment this => c.z == 0 at line: float3 d = fetch(*t) - c;
__threadfence_block();
//compute covariance matrix
int tid = threadIdx.x;
for(int i = 0; i < 6; ++i)
cov_buffer[i][tid] = 0.f;
for(const int *t = ibeg + lane; t < iend; t += Warp::STRIDE)
{
//float3 d = fetch(*t) - c;
float3 p = fetch(*t);
float3 d = p - c;
cov_buffer[0][tid] += d.x * d.x; //cov (0, 0)
cov_buffer[1][tid] += d.x * d.y; //cov (0, 1)
cov_buffer[2][tid] += d.x * d.z; //cov (0, 2)
cov_buffer[3][tid] += d.y * d.y; //cov (1, 1)
cov_buffer[4][tid] += d.y * d.z; //cov (1, 2)
cov_buffer[5][tid] += d.z * d.z; //cov (2, 2)
}
Warp::reduce(&cov_buffer[0][tid - lane], plus());
Warp::reduce(&cov_buffer[1][tid - lane], plus());
Warp::reduce(&cov_buffer[2][tid - lane], plus());
Warp::reduce(&cov_buffer[3][tid - lane], plus());
Warp::reduce(&cov_buffer[4][tid - lane], plus());
Warp::reduce(&cov_buffer[5][tid - lane], plus());
volatile float *cov = &cov_buffer[0][tid-lane];
if (lane < 6)
cov[lane] = cov_buffer[lane][tid-lane];
//solvePlaneParameters
if (lane == 0)
{
// Extract the eigenvalues and eigenvectors
typedef Eigen33::Mat33 Mat33;
Eigen33 eigen33(&cov[lane]);
Mat33& tmp = (Mat33&)cov_buffer[1][tid - lane];
Mat33& vec_tmp = (Mat33&)cov_buffer[2][tid - lane];
Mat33& evecs = (Mat33&)cov_buffer[3][tid - lane];
float3 evals;
eigen33.compute(tmp, vec_tmp, evecs, evals);
//evecs[0] - eigenvector with the lowerst eigenvalue
// Compute the curvature surface change
float eig_sum = evals.x + evals.y + evals.z;
float curvature = (eig_sum == 0) ? 0 : fabsf( evals.x / eig_sum );
NormalType output;
output.w = curvature;
// The normalization is not necessary, since the eigenvectors from Eigen33 are already normalized
output.x = evecs[0].x;
output.y = evecs[0].y;
output.z = evecs[0].z;
normals.data[idx] = output;
}
}
__device__ __forceinline__ float3 fetch(int idx) const
{
/*PointType p = points[idx];
return make_float3(p.x, p.y, p.z);*/
return *(float3*)&points[idx];
}
};
__global__ void EstimateNormaslKernel(const NormalsEstimator est) { est(); }
struct FlipNormal
{
const PointType* cloud;
float3 vp;
mutable PtrSz<NormalType> normals;
__device__ __forceinline__ void operator()(int idx, const float3& p) const
{
NormalType n = normals[idx];
float vp_x = vp.x - p.x;
float vp_y = vp.y - p.y;
float vp_z = vp.z - p.z;
// Dot product between the (viewpoint - point) and the plane normal
float cos_theta = vp_x * n.x + vp_y * n.y + vp_z * n.z;
// Flip the plane normal
if (cos_theta < 0)
{
n.x *= -1;
n.y *= -1;
n.z *= -1;
normals[idx] = n;
}
}
};
__global__ void flipNormalTowardsViewpointKernel(const FlipNormal flip)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < flip.normals.size)
{
float3 p = *(float3*)&flip.cloud[idx];
flip(idx, p);
}
}
__global__ void flipNormalTowardsViewpointKernel(const FlipNormal flip, const int* indices)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < flip.normals.size)
{
float3 p = *(float3*)&flip.cloud[indices[idx]];
flip(idx, p);
}
}
}
}
void pcl::device::computeNormals(const PointCloud& cloud, const NeighborIndices& nn_indices, Normals& normals)
{
NormalsEstimator est;
est.indices = nn_indices;
est.sizes = nn_indices.sizes;
est.points = cloud;
est.normals = normals;
//printFuncAttrib(EstimateNormaslKernel);
int block = NormalsEstimator::CTA_SIZE;
int grid = divUp((int)normals.size(), NormalsEstimator::WAPRS);
EstimateNormaslKernel<<<grid, block>>>(est);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall(cudaDeviceSynchronize());
}
void pcl::device::flipNormalTowardsViewpoint(const PointCloud& cloud, const float3& vp, Normals& normals)
{
int block = 256;
int grid = divUp((int)normals.size(), block);
FlipNormal flip;
flip.cloud = cloud;
flip.vp = vp;
flip.normals = normals;
flipNormalTowardsViewpointKernel<<<grid, block>>>(flip);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall(cudaDeviceSynchronize());
}
void pcl::device::flipNormalTowardsViewpoint(const PointCloud& cloud, const Indices& indices, const float3& vp, Normals& normals)
{
int block = 256;
int grid = divUp((int)normals.size(), block);
FlipNormal flip;
flip.cloud = cloud;
flip.vp = vp;
flip.normals = normals;
flipNormalTowardsViewpointKernel<<<grid, block>>>(flip, indices.ptr());
cudaSafeCall( cudaGetLastError() );
cudaSafeCall(cudaDeviceSynchronize());
}
|
a15a0d56861298cb5c9663477dbb05bf98057660.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "packcoo_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int num_entries = 1;
int *row_indices = NULL;
hipMalloc(&row_indices, XSIZE*YSIZE);
int *column_indices = NULL;
hipMalloc(&column_indices, XSIZE*YSIZE);
int *aggridx = NULL;
hipMalloc(&aggridx, XSIZE*YSIZE);
int *partidx = NULL;
hipMalloc(&partidx, XSIZE*YSIZE);
int *partlabel = NULL;
hipMalloc(&partlabel, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
packcoo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, num_entries,row_indices,column_indices,aggridx,partidx,partlabel);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
packcoo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, num_entries,row_indices,column_indices,aggridx,partidx,partlabel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
packcoo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, num_entries,row_indices,column_indices,aggridx,partidx,partlabel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a15a0d56861298cb5c9663477dbb05bf98057660.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "packcoo_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int num_entries = 1;
int *row_indices = NULL;
cudaMalloc(&row_indices, XSIZE*YSIZE);
int *column_indices = NULL;
cudaMalloc(&column_indices, XSIZE*YSIZE);
int *aggridx = NULL;
cudaMalloc(&aggridx, XSIZE*YSIZE);
int *partidx = NULL;
cudaMalloc(&partidx, XSIZE*YSIZE);
int *partlabel = NULL;
cudaMalloc(&partlabel, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
packcoo_kernel<<<gridBlock,threadBlock>>>(num_entries,row_indices,column_indices,aggridx,partidx,partlabel);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
packcoo_kernel<<<gridBlock,threadBlock>>>(num_entries,row_indices,column_indices,aggridx,partidx,partlabel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
packcoo_kernel<<<gridBlock,threadBlock>>>(num_entries,row_indices,column_indices,aggridx,partidx,partlabel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
216ba01df84b94a4881ea5e2d2325cf02379ed86.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include <type_traits>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void transpose(T *src,
T *dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
inline __device__ T add_func(T a, T b);
template <>
__device__ float add_func<float>(float a, float b) {
return a + b;
}
template <>
__device__ float2 add_func<float2>(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
template <>
__device__ float4 add_func<float4>(float4 a, float4 b) {
float4 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
c.z = a.z + b.z;
c.w = a.w + b.w;
return c;
}
#if defined(PADDLE_WITH_CUDA)
template <>
__device__ half2 add_func<half2>(half2 a, half2 b) {
#if __CUDA_ARCH__ >= 530
return __hadd2(a, b);
#else
return half2(__float2half(__half2float(a.x) + __half2float(b.x)),
__float2half(__half2float(b.x) + __half2float(b.y)));
#endif
}
template <>
__device__ half add_func<half>(half a, half b) {
#if __CUDA_ARCH__ >= 530
return __hadd(a, b);
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
#endif
template <typename T>
__global__ void TransposeQkvKernel(const int H,
const T *input,
const T *bias,
T *output) {
// Input: BxSx3xNxH
// Bias: 3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int bias_offset = m * NH + n * H;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] =
add_func(input[in_offset + i], bias[bias_offset + i]);
}
template <typename T>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const T *input,
const T *bias,
T *output,
gpuStream_t stream);
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const float *input,
const float *bias,
float *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
// scratch % 4 == 0 to ensure the alignment
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 4));
hipLaunchKernelGGL(( TransposeQkvKernel<float4>)
, dim3(grid), dim3(block), 0, stream, h, input4, bias4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
const float2 *bias2 = reinterpret_cast<const float2 *>(bias);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
hipLaunchKernelGGL(( TransposeQkvKernel<float2>)
, dim3(grid), dim3(block), 0, stream, h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
hipLaunchKernelGGL(( TransposeQkvKernel<float>)
, dim3(grid), dim3(block), 0, stream, head_size, input, bias, output);
}
}
#if defined(PADDLE_WITH_CUDA)
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const platform::float16 *input,
const platform::float16 *bias,
platform::float16 *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const half2 *input2 = reinterpret_cast<const half2 *>(input);
const half2 *bias2 = reinterpret_cast<const half2 *>(bias);
half2 *output2 = reinterpret_cast<half2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
hipLaunchKernelGGL(( TransposeQkvKernel<half2>)
, dim3(grid), dim3(block), 0, stream, h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
const half *input_half = reinterpret_cast<const half *>(input);
const half *bias_half = reinterpret_cast<const half *>(bias);
half *output_half = reinterpret_cast<half *>(output);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
hipLaunchKernelGGL(( TransposeQkvKernel<half>), dim3(grid), dim3(block), 0, stream,
head_size, input_half, bias_half, output_half);
}
}
#endif
inline int round_up(int seq_len, int multiple = 32) {
PADDLE_ENFORCE_GT(
multiple,
0,
platform::errors::InvalidArgument(
"multiple should be a positive numberbut it's (%d)", multiple));
return ((seq_len + multiple - 1) / multiple) * multiple;
}
template <typename T>
__global__ void broadcast(const T *src,
T *dst,
const int seq_len,
const int head_num) {
int batch_id = blockIdx.x / (head_num * seq_len);
int dst_offset = blockIdx.x * seq_len;
if (threadIdx.x < seq_len) {
dst[threadIdx.x + dst_offset] = src[threadIdx.x + batch_id * seq_len];
}
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
using Tensor = framework::Tensor;
auto *input = context.Input<framework::Tensor>("Input");
auto *w = context.Input<framework::Tensor>("W");
auto *bias = context.Input<framework::Tensor>("Bias");
auto &bias_qk = GET_DATA_SAFELY(context.Input<framework::Tensor>("BiasQK"),
"Input",
"BiasQK",
"MultiHeadMatMulV2");
auto *input_d = input->data<T>();
auto *w_d = w->data<T>();
auto *bias_d = bias->data<T>();
auto *bias_qk_d = bias_qk.template data<T>();
T scale = static_cast<T>(context.Attr<float>("alpha"));
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
auto stream = device_ctx.stream();
// should be (B * S * hidden)
auto input_dims = input->dims();
// shouble be (hidden * 3 * all_head_size)
auto w_dims = w->dims();
int batch = input_dims[0];
int seq_len = input_dims[1];
int hidden = input_dims[2];
Tensor temp_bias_tensor;
// if bias_qk is[batch, 1, 1, seq_len], the bias_qk_d need to be broadcasted
if (bias_qk.numel() == (batch * seq_len)) {
temp_bias_tensor.Resize({batch * head_number * seq_len * seq_len});
auto *temp_qk_bias = temp_bias_tensor.mutable_data<T>(context.GetPlace());
int grid = batch * head_number * seq_len;
int block = round_up(seq_len);
hipLaunchKernelGGL(( broadcast), dim3(grid), dim3(block), 0, stream,
bias_qk_d, temp_qk_bias, seq_len, head_number);
bias_qk_d = static_cast<const T *>(temp_qk_bias);
}
int all_head_size = w_dims[2];
int head_size = all_head_size / head_number;
auto *out = context.Output<framework::Tensor>("Out");
out->Resize({batch, seq_len, all_head_size});
auto *output_d = out->mutable_data<T>(context.GetPlace());
// (B*S, hidden)
const Tensor input_matrix =
framework::ReshapeToMatrix(*input, 2 /*x_num_col_dims */);
// (hidden, 3 * all_head_size)
const Tensor w_matrix =
framework::ReshapeToMatrix(*w, 1 /*y_num_col_dims*/);
Tensor temp_out_tensor;
auto temp_out_dims =
phi::make_ddim({batch, seq_len, 3, head_number, head_size});
temp_out_tensor.Resize(
{batch * seq_len, phi::product(temp_out_dims) / (batch * seq_len)});
auto *temp_out_data = temp_out_tensor.mutable_data<T>(context.GetPlace());
// (B * S, hidden) * (hidden, 3 * N * H) -> (B * S * 3 * N * H)
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(device_ctx);
blas.MatMul(input_matrix, w_matrix, &temp_out_tensor);
// temp_out_tensor.Resize(temp_out_dims);
Tensor multihead_temp_tensor;
// B * head_number * S * S * 1 + B * S * 3 * N * H
int scratch_size = batch * head_number * seq_len * seq_len * 1;
multihead_temp_tensor.Resize({scratch_size + temp_out_tensor.numel()});
auto *multihead_temp_data =
multihead_temp_tensor.mutable_data<T>(context.GetPlace());
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
// Do the transpose with bias.
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransQKVWithBias(batch,
seq_len,
head_size,
head_number,
temp_out_data,
bias_d,
tptr,
stream);
if (std::is_same<T, platform::float16>::value) {
math::MultiHeadGPUComputeFunctor<half> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
reinterpret_cast<half *>(qkptr),
reinterpret_cast<const half *>(bias_qk_d),
reinterpret_cast<half *>(tptr),
__float2half(static_cast<float>(scale)),
__float2half(0.0));
} else {
math::MultiHeadGPUComputeFunctor<T> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
qkptr,
bias_qk_d,
tptr,
scale,
T(0.0));
}
int grid = batch * head_number * seq_len;
int block = head_size;
hipLaunchKernelGGL(( transpose<T>), dim3(grid), dim3(block), 0, stream,
tptr, output_d, batch, seq_len, head_number, head_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 10000
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, paddle::platform::float16>,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#else
REGISTER_OP_CUDA_KERNEL(multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#endif
|
216ba01df84b94a4881ea5e2d2325cf02379ed86.cu
|
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <paddle/fluid/platform/device_context.h>
#include <algorithm>
#include <type_traits>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void transpose(T *src,
T *dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head) {
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head +
threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template <typename T>
inline __device__ T add_func(T a, T b);
template <>
__device__ float add_func<float>(float a, float b) {
return a + b;
}
template <>
__device__ float2 add_func<float2>(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
template <>
__device__ float4 add_func<float4>(float4 a, float4 b) {
float4 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
c.z = a.z + b.z;
c.w = a.w + b.w;
return c;
}
#if defined(PADDLE_WITH_CUDA)
template <>
__device__ half2 add_func<half2>(half2 a, half2 b) {
#if __CUDA_ARCH__ >= 530
return __hadd2(a, b);
#else
return half2(__float2half(__half2float(a.x) + __half2float(b.x)),
__float2half(__half2float(b.x) + __half2float(b.y)));
#endif
}
template <>
__device__ half add_func<half>(half a, half b) {
#if __CUDA_ARCH__ >= 530
return __hadd(a, b);
#else
return __float2half(__half2float(a) + __half2float(b));
#endif
}
#endif
template <typename T>
__global__ void TransposeQkvKernel(const int H,
const T *input,
const T *bias,
T *output) {
// Input: BxSx3xNxH
// Bias: 3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z;
const int N = blockDim.y;
const int S = gridDim.x;
const int B = gridDim.y;
const int NH = N * H;
const int NHS = NH * S;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int bias_offset = m * NH + n * H;
const int out_offset = s * H + n * S * H + b * NHS + m * NHS * B;
const int i = threadIdx.x;
output[out_offset + i] =
add_func(input[in_offset + i], bias[bias_offset + i]);
}
template <typename T>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const T *input,
const T *bias,
T *output,
gpuStream_t stream);
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const float *input,
const float *bias,
float *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
// scratch % 4 == 0 to ensure the alignment
if (head_size % 4 == 0 && scratch_size % 4 == 0) {
const int h = head_size / 4;
const float4 *input4 = reinterpret_cast<const float4 *>(input);
const float4 *bias4 = reinterpret_cast<const float4 *>(bias);
float4 *output4 = reinterpret_cast<float4 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 4));
TransposeQkvKernel<float4>
<<<grid, block, 0, stream>>>(h, input4, bias4, output4);
} else if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const float2 *input2 = reinterpret_cast<const float2 *>(input);
const float2 *bias2 = reinterpret_cast<const float2 *>(bias);
float2 *output2 = reinterpret_cast<float2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
TransposeQkvKernel<float2>
<<<grid, block, 0, stream>>>(h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
TransposeQkvKernel<float>
<<<grid, block, 0, stream>>>(head_size, input, bias, output);
}
}
#if defined(PADDLE_WITH_CUDA)
template <>
void TransQKVWithBias(const int batch,
const int seq_len,
const int head_size,
const int head_num,
const platform::float16 *input,
const platform::float16 *bias,
platform::float16 *output,
gpuStream_t stream) {
// BxSx3xNxH + 3xNxH -> 3xBxNxSxH
int scratch_size = batch * head_num * seq_len * seq_len;
const dim3 grid(seq_len, batch, 3);
if (head_size % 2 == 0 && scratch_size % 2 == 0) {
const int h = head_size / 2;
const half2 *input2 = reinterpret_cast<const half2 *>(input);
const half2 *bias2 = reinterpret_cast<const half2 *>(bias);
half2 *output2 = reinterpret_cast<half2 *>(output);
const dim3 block(h, head_num, 1);
// limit h * head_num to max block size(1024).
PADDLE_ENFORCE_LE(h * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024 * 2));
TransposeQkvKernel<half2>
<<<grid, block, 0, stream>>>(h, input2, bias2, output2);
} else {
const dim3 block(head_size, head_num, 1);
const half *input_half = reinterpret_cast<const half *>(input);
const half *bias_half = reinterpret_cast<const half *>(bias);
half *output_half = reinterpret_cast<half *>(output);
// limit head_size * head_num to max block size(1024).
PADDLE_ENFORCE_LE(head_size * head_num,
1024,
platform::errors::InvalidArgument(
"head_num (%d) * head_size (%d) should <= %d",
head_num,
head_size,
1024));
TransposeQkvKernel<half><<<grid, block, 0, stream>>>(
head_size, input_half, bias_half, output_half);
}
}
#endif
inline int round_up(int seq_len, int multiple = 32) {
PADDLE_ENFORCE_GT(
multiple,
0,
platform::errors::InvalidArgument(
"multiple should be a positive number,but it's (%d)", multiple));
return ((seq_len + multiple - 1) / multiple) * multiple;
}
template <typename T>
__global__ void broadcast(const T *src,
T *dst,
const int seq_len,
const int head_num) {
int batch_id = blockIdx.x / (head_num * seq_len);
int dst_offset = blockIdx.x * seq_len;
if (threadIdx.x < seq_len) {
dst[threadIdx.x + dst_offset] = src[threadIdx.x + batch_id * seq_len];
}
}
template <typename DeviceContext, typename T>
class MultiHeadMatMulV2Kernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
using Tensor = framework::Tensor;
auto *input = context.Input<framework::Tensor>("Input");
auto *w = context.Input<framework::Tensor>("W");
auto *bias = context.Input<framework::Tensor>("Bias");
auto &bias_qk = GET_DATA_SAFELY(context.Input<framework::Tensor>("BiasQK"),
"Input",
"BiasQK",
"MultiHeadMatMulV2");
auto *input_d = input->data<T>();
auto *w_d = w->data<T>();
auto *bias_d = bias->data<T>();
auto *bias_qk_d = bias_qk.template data<T>();
T scale = static_cast<T>(context.Attr<float>("alpha"));
int head_number = context.Attr<int>("head_number");
// compute q*k with eltadd
auto &device_ctx = context.template device_context<DeviceContext>();
auto stream = device_ctx.stream();
// should be (B * S * hidden)
auto input_dims = input->dims();
// shouble be (hidden * 3 * all_head_size)
auto w_dims = w->dims();
int batch = input_dims[0];
int seq_len = input_dims[1];
int hidden = input_dims[2];
Tensor temp_bias_tensor;
// if bias_qk is[batch, 1, 1, seq_len], the bias_qk_d need to be broadcasted
if (bias_qk.numel() == (batch * seq_len)) {
temp_bias_tensor.Resize({batch * head_number * seq_len * seq_len});
auto *temp_qk_bias = temp_bias_tensor.mutable_data<T>(context.GetPlace());
int grid = batch * head_number * seq_len;
int block = round_up(seq_len);
broadcast<<<grid, block, 0, stream>>>(
bias_qk_d, temp_qk_bias, seq_len, head_number);
bias_qk_d = static_cast<const T *>(temp_qk_bias);
}
int all_head_size = w_dims[2];
int head_size = all_head_size / head_number;
auto *out = context.Output<framework::Tensor>("Out");
out->Resize({batch, seq_len, all_head_size});
auto *output_d = out->mutable_data<T>(context.GetPlace());
// (B*S, hidden)
const Tensor input_matrix =
framework::ReshapeToMatrix(*input, 2 /*x_num_col_dims */);
// (hidden, 3 * all_head_size)
const Tensor w_matrix =
framework::ReshapeToMatrix(*w, 1 /*y_num_col_dims*/);
Tensor temp_out_tensor;
auto temp_out_dims =
phi::make_ddim({batch, seq_len, 3, head_number, head_size});
temp_out_tensor.Resize(
{batch * seq_len, phi::product(temp_out_dims) / (batch * seq_len)});
auto *temp_out_data = temp_out_tensor.mutable_data<T>(context.GetPlace());
// (B * S, hidden) * (hidden, 3 * N * H) -> (B * S * 3 * N * H)
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(device_ctx);
blas.MatMul(input_matrix, w_matrix, &temp_out_tensor);
// temp_out_tensor.Resize(temp_out_dims);
Tensor multihead_temp_tensor;
// B * head_number * S * S * 1 + B * S * 3 * N * H
int scratch_size = batch * head_number * seq_len * seq_len * 1;
multihead_temp_tensor.Resize({scratch_size + temp_out_tensor.numel()});
auto *multihead_temp_data =
multihead_temp_tensor.mutable_data<T>(context.GetPlace());
auto *qkptr = multihead_temp_data;
auto *tptr = multihead_temp_data + scratch_size;
// Do the transpose with bias.
// BxSx3xNxH => tptr: 3xBxNxSxH.
TransQKVWithBias(batch,
seq_len,
head_size,
head_number,
temp_out_data,
bias_d,
tptr,
stream);
if (std::is_same<T, platform::float16>::value) {
math::MultiHeadGPUComputeFunctor<half> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
reinterpret_cast<half *>(qkptr),
reinterpret_cast<const half *>(bias_qk_d),
reinterpret_cast<half *>(tptr),
__float2half(static_cast<float>(scale)),
__float2half(0.0));
} else {
math::MultiHeadGPUComputeFunctor<T> multihead_compute_func;
multihead_compute_func(device_ctx,
batch,
seq_len,
head_number,
head_size,
qkptr,
bias_qk_d,
tptr,
scale,
T(0.0));
}
int grid = batch * head_number * seq_len;
int block = head_size;
transpose<T><<<grid, block, 0, stream>>>(
tptr, output_d, batch, seq_len, head_number, head_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
REGISTER_OP_CUDA_KERNEL(
multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, paddle::platform::float16>,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#else
REGISTER_OP_CUDA_KERNEL(multihead_matmul,
ops::MultiHeadMatMulV2Kernel<phi::GPUContext, float>);
#endif
|
0310fb04ac434aab954d0c3f152ca7bc27ecaeda.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
__global__ void vc(float *dA, float *dB, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dA[id] = dB[id];
}
}
extern "C" {
void vcGPU(float* A, float *B, int start, int end, int GPUN) {
float *dA, *dB;
hipMalloc(&dA, sizeof(float) * GPUN);
hipMalloc(&dB, sizeof(float) * GPUN);
hipMemcpy(dB, B + start, sizeof(float) * GPUN, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vc), dim3(ceil(((float)GPUN)/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, dA, dB, GPUN);
hipDeviceSynchronize();
hipMemcpy(A + start, dA, sizeof(float) * GPUN, hipMemcpyDeviceToHost);
hipFree(dA);
hipFree(dB);
}
}
|
0310fb04ac434aab954d0c3f152ca7bc27ecaeda.cu
|
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
__global__ void vc(float *dA, float *dB, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dA[id] = dB[id];
}
}
extern "C" {
void vcGPU(float* A, float *B, int start, int end, int GPUN) {
float *dA, *dB;
cudaMalloc(&dA, sizeof(float) * GPUN);
cudaMalloc(&dB, sizeof(float) * GPUN);
cudaMemcpy(dB, B + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice);
vc<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dA, dB, GPUN);
cudaDeviceSynchronize();
cudaMemcpy(A + start, dA, sizeof(float) * GPUN, cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dB);
}
}
|
140c27f546b55b456370caf4c6dfdaf9404d9870.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip"
#else
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>)
, dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, true},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, false},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(hipGetLastError());
return val;
}
void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(INFINITY),
dimension, keepdim);
} else {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<scalar_t>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
scalar_cast<accreal>(INFINITY),
&result, 0);
} else {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
ReduceMax<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY),
ReduceMin<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0)));
} else {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(value));
result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
}
#endif
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
#endif
|
140c27f546b55b456370caf4c6dfdaf9404d9870.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu"
#else
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
THCTensor_kernel_renorm<scalar_t, accreal>
<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError_t errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, true},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, false},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(cudaGetLastError());
return val;
}
void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(INFINITY),
dimension, keepdim);
} else {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<scalar_t>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
scalar_cast<accreal>(INFINITY),
&result, 0);
} else {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
ReduceMax<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY),
ReduceMin<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0)));
} else {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(value));
result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
}
#endif
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
#endif
|
3a154bdd64ec40b543e925ac425957862a724964.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main(void){
int a, b, c; //host copies of a, b, c
int *d_a, *d_b, *d_c; //device copies of a, b, c
int size = sizeof(int);
//Allocate space for device copies of a, b, c
hipMalloc((void **) &d_a, size); //take in address of pointer
hipMalloc((void **) &d_b, size);
hipMalloc((void **) &d_c, size);
//set up input values
a = 2;
b = 7;
//copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
//launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
//copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("a(%d) + b(%d) = %d\n", a,b,c);
//Cleanup
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
3a154bdd64ec40b543e925ac425957862a724964.cu
|
#include <stdio.h>
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main(void){
int a, b, c; //host copies of a, b, c
int *d_a, *d_b, *d_c; //device copies of a, b, c
int size = sizeof(int);
//Allocate space for device copies of a, b, c
cudaMalloc((void **) &d_a, size); //take in address of pointer
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
//set up input values
a = 2;
b = 7;
//copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
//launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
//copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("a(%d) + b(%d) = %d\n", a,b,c);
//Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
9b32d060522b4a756bf3b0069da98c34c564f170.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
__global__ static void findsmallest(int * primes,int f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > 1) {
for(int i=idx+idx;i < f;i+=idx)
primes[i] = 1;
}
}
int main(int argc, char* argv[])
{
int *primes;
int sieve[100000];
double n = sizeof(sieve)/sizeof(int);
if(!InitCUDA()) {
return 0;
}
hipMalloc((void**) &primes, sizeof(int) * n);
hipLaunchKernelGGL(( findsmallest), dim3(1), dim3(512), 16000, 0, primes, n);
hipDeviceSynchronize();
hipMemcpy(&sieve, primes, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(primes);
for(int k=2;k < n;++k)
if (sieve[k] == 0)
printf("%d is prime\n",k);
return 0;
}
|
9b32d060522b4a756bf3b0069da98c34c564f170.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
/************************************************************************/
/* Init CUDA */
/************************************************************************/
#if __DEVICE_EMULATION__
bool InitCUDA(void){return true;}
#else
bool InitCUDA(void)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaSetDevice(i);
printf("CUDA initialized.\n");
return true;
}
#endif
__global__ static void findsmallest(int * primes,int f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > 1) {
for(int i=idx+idx;i < f;i+=idx)
primes[i] = 1;
}
}
int main(int argc, char* argv[])
{
int *primes;
int sieve[100000];
double n = sizeof(sieve)/sizeof(int);
if(!InitCUDA()) {
return 0;
}
cudaMalloc((void**) &primes, sizeof(int) * n);
findsmallest<<<1, 512, 16000>>>(primes, n);
cudaThreadSynchronize();
cudaMemcpy(&sieve, primes, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(primes);
for(int k=2;k < n;++k)
if (sieve[k] == 0)
printf("%d is prime\n",k);
return 0;
}
|
90ef7961bfc9b291c83606071e376d1afa816c69.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace crfasrnn_caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
loss += ::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLForward(const int count, const int channels,
const Dtype margin, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
if ((margin-dist_sq[n]) > 0.0) {
bottom_diff[i] = -alpha * diff[i];
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, margin, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace crfasrnn_caffe
|
90ef7961bfc9b291c83606071e376d1afa816c69.cu
|
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace crfasrnn_caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLForward(const int count, const int channels,
const Dtype margin, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
if ((margin-dist_sq[n]) > 0.0) {
bottom_diff[i] = -alpha * diff[i];
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
CLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, margin, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace crfasrnn_caffe
|
6a96995a1b9c48b9bd4c5f46c47f8150bcd0192a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/dropout_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void DropoutKernel(const int N, const float ratio,
const float* Xdata, float* Ydata,
bool* maskdata) {
const float scale = 1. / (1. - ratio);
CUDA_1D_KERNEL_LOOP(i, N) {
maskdata[i] = (Ydata[i] > ratio);
Ydata[i] = Xdata[i] * scale * maskdata[i];
}
}
} // namespace
template <>
bool DropoutOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto* mask = Output(1);
Y->Resize(X.dims());
mask->Resize(X.dims());
DCHECK_GT(X.size(), 0);
if (is_test_) {
if (Y != &X) {
context_.Copy<float, CUDAContext, CUDAContext>(
X.size(), X.data<float>(), Y->mutable_data<float>());
}
return true;
} else {
// We do a simple trick here: since hiprand cannot generate random
// boolean numbers, we will generate into dY and write the result to
// mask.
float* Ydata = Y->mutable_data<float>();
CURAND_CHECK(hiprandGenerateUniform(
context_.curand_generator(), Ydata, X.size()));
hipLaunchKernelGGL(( DropoutKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X.size(), ratio_, X.data<float>(), Ydata, mask->mutable_data<bool>());
return true;
}
}
namespace {
__global__ void DropoutGradientKernel(const int N, const float* dYdata,
const bool* maskdata,
const float scale, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i] * maskdata[i] * scale;
}
}
} // namespace
template <>
bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto& mask = Input(1);
auto* dX = Output(0);
DCHECK_GT(dY.size(), 0);
DCHECK_EQ(dY.size(), mask.size());
dX->Resize(dY.dims());
if (is_test_) {
if (dX != &dY) {
context_.Copy<float, CUDAContext, CUDAContext>(
dY.size(), dY.data<float>(), dX->mutable_data<float>());
}
return true;
} else {
const float scale = 1. / (1. - ratio_);
hipLaunchKernelGGL(( DropoutGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
dY.size(), dY.data<float>(), mask.data<bool>(), scale,
dX->mutable_data<float>());
return true;
}
}
namespace {
REGISTER_CUDA_OPERATOR(Dropout, DropoutOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
|
6a96995a1b9c48b9bd4c5f46c47f8150bcd0192a.cu
|
#include "caffe2/operators/dropout_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void DropoutKernel(const int N, const float ratio,
const float* Xdata, float* Ydata,
bool* maskdata) {
const float scale = 1. / (1. - ratio);
CUDA_1D_KERNEL_LOOP(i, N) {
maskdata[i] = (Ydata[i] > ratio);
Ydata[i] = Xdata[i] * scale * maskdata[i];
}
}
} // namespace
template <>
bool DropoutOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
auto* mask = Output(1);
Y->Resize(X.dims());
mask->Resize(X.dims());
DCHECK_GT(X.size(), 0);
if (is_test_) {
if (Y != &X) {
context_.Copy<float, CUDAContext, CUDAContext>(
X.size(), X.data<float>(), Y->mutable_data<float>());
}
return true;
} else {
// We do a simple trick here: since curand cannot generate random
// boolean numbers, we will generate into dY and write the result to
// mask.
float* Ydata = Y->mutable_data<float>();
CURAND_CHECK(curandGenerateUniform(
context_.curand_generator(), Ydata, X.size()));
DropoutKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X.size(), ratio_, X.data<float>(), Ydata, mask->mutable_data<bool>());
return true;
}
}
namespace {
__global__ void DropoutGradientKernel(const int N, const float* dYdata,
const bool* maskdata,
const float scale, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i] * maskdata[i] * scale;
}
}
} // namespace
template <>
bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto& mask = Input(1);
auto* dX = Output(0);
DCHECK_GT(dY.size(), 0);
DCHECK_EQ(dY.size(), mask.size());
dX->Resize(dY.dims());
if (is_test_) {
if (dX != &dY) {
context_.Copy<float, CUDAContext, CUDAContext>(
dY.size(), dY.data<float>(), dX->mutable_data<float>());
}
return true;
} else {
const float scale = 1. / (1. - ratio_);
DropoutGradientKernel<<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
dY.size(), dY.data<float>(), mask.data<bool>(), scale,
dX->mutable_data<float>());
return true;
}
}
namespace {
REGISTER_CUDA_OPERATOR(Dropout, DropoutOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
|
e2a1338db66309b05256570e430e19af04527b8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
// fyk dev_boxes dim is 4 or 5
// boxes_dim must be a const value not a var,or NVCC will not compile
#define box_dim 4
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * box_dim];
if (threadIdx.x < col_size) {
for (int d = 0; d < box_dim; d ++) {
block_boxes[threadIdx.x * box_dim + d] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + d];
}
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * box_dim;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * box_dim) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
namespace caffe {
namespace Frcnn {
// code from https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/nms_kernel.cu
// fyk:params is all cpu memory var, boxes_dim should be 4 instead of 5(x1,y1,x2,y2,confidence),the boxes_host should have already sorted by confidence from big to little
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int row_start = blockIdx.x;
const int col_start = blockIdx.y;
// note that this statement is different with nms_kernel, can also asign col_start with blockIdx.x and the order doesn't matter if the following statedments is right.
// if (row_start > col_start) return;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 4];
__shared__ float block_query_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 4 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_query_boxes[threadIdx.x * 4 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_query_boxes[threadIdx.x * 4 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_query_boxes[threadIdx.x * 4 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devIoU(block_boxes + threadIdx.x * 4, block_query_boxes + i * 4);
}
}
}
// fyk:params is all cpu memory var, boxes_dim should be 4(x1,y1,x2,y2)
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(hipMalloc(&boxes_dev,
n * 4 * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes,
n * 4 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&query_boxes_dev,
k * 4 * sizeof(float)));
CUDA_CHECK(hipMemcpy(query_boxes_dev,
query_boxes,
k * 4 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&overlaps_dev,
n * k * sizeof(float)));
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( overlaps_kernel), dim3(blocks), dim3(threads), 0, 0, n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(hipMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(overlaps_dev));
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(query_boxes_dev));
}
} // namespace frcnn
} // namespace caffe
|
e2a1338db66309b05256570e430e19af04527b8a.cu
|
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
// fyk dev_boxes dim is 4 or 5
// boxes_dim must be a const value not a var,or NVCC will not compile
#define box_dim 4
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * box_dim];
if (threadIdx.x < col_size) {
for (int d = 0; d < box_dim; d ++) {
block_boxes[threadIdx.x * box_dim + d] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * box_dim + d];
}
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * box_dim;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * box_dim) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
namespace caffe {
namespace Frcnn {
// code from https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/nms_kernel.cu
// fyk:params is all cpu memory var, boxes_dim should be 4 instead of 5(x1,y1,x2,y2,confidence),the boxes_host should have already sorted by confidence from big to little
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int row_start = blockIdx.x;
const int col_start = blockIdx.y;
// note that this statement is different with nms_kernel, can also asign col_start with blockIdx.x and the order doesn't matter if the following statedments is right.
// if (row_start > col_start) return;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 4];
__shared__ float block_query_boxes[threadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 4 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_query_boxes[threadIdx.x * 4 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_query_boxes[threadIdx.x * 4 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_query_boxes[threadIdx.x * 4 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devIoU(block_boxes + threadIdx.x * 4, block_query_boxes + i * 4);
}
}
}
// fyk:params is all cpu memory var, boxes_dim should be 4(x1,y1,x2,y2)
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(cudaMalloc(&boxes_dev,
n * 4 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes,
n * 4 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&query_boxes_dev,
k * 4 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(query_boxes_dev,
query_boxes,
k * 4 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&overlaps_dev,
n * k * sizeof(float)));
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
overlaps_kernel<<<blocks, threads>>>(n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(cudaMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(overlaps_dev));
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(query_boxes_dev));
}
} // namespace frcnn
} // namespace caffe
|
e3e991752f6de7cd39824ca99337bef34600bba6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "textures.cuh"
namespace lequiv {
#define LEQUIV_BLOCK_SIZE_X 16
#define LEQUIV_BLOCK_SIZE_Y 16
__global__ void LEQUIV_prescan(int* L, int* R, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
L[index] = index;
R[index] = index;
}
}
__global__ void LEQUIV_scan(int* R, int w, int h, int* d_stop) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
unsigned char v = tex2D(imgtex, x, y);
int label = tex1Dfetch(Ltex, index);
int newlabel = w*h;
if (y>0 && tex2D(imgtex, x, y-1) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index-w));
}
if (y<h-1 && tex2D(imgtex, x, y+1) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index+w));
}
if (x>0 && tex2D(imgtex, x-1, y) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index-1));
}
if (x<w-1 && tex2D(imgtex, x+1, y) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index+1));
}
if (newlabel< label) {
R[label] = newlabel;
*d_stop = 0;
}
}
}
__global__ void LEQUIV_analysis(int* L, int* R, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
int label;
if (x < w && y < h) {
label = L[index];
if (label == index) {
int deep = 128;
int rf = label;
//label = tex1Dfetch(Rtex, rf);
label = R[rf];
while (rf!=label && deep>0) {
rf = label;
label = tex1Dfetch(Rtex, rf);
deep--;
}
//texture will be invalid
R[index] = label;
}
}
}
__global__ void LEQUIV_labeling(int* L, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
int label = L[index];
int cc = tex1Dfetch(Rtex, label);
L[index] = tex1Dfetch(Rtex, cc);
}
}
void CCL(unsigned char* img, int w, int h, int* label) {
hipError_t err;
hipArray* imgarray;
hipChannelFormatDesc uchardesc =
hipCreateChannelDesc<unsigned char>();
hipMallocArray(&imgarray, &uchardesc, w, h);
int* L;
hipMalloc((void**)&L, w*h*sizeof(int));
int* R;
hipMalloc((void**)&R, w*h*sizeof(int));
err = hipGetLastError();
if (err != hipSuccess) {
printf("startERROR: %s\n", hipGetErrorString(err));
return;
}
hipChannelFormatDesc intdesc =
hipCreateChannelDesc<int>();
hipBindTextureToArray(imgtex, imgarray, uchardesc);
hipBindTexture(NULL, Ltex, L, intdesc, w*h*sizeof(int));
hipBindTexture(NULL, Rtex, R, intdesc, w*h*sizeof(int));
int stop;
int* d_stop;
hipMalloc((void**)&d_stop, sizeof(int));
dim3 block (LEQUIV_BLOCK_SIZE_X, LEQUIV_BLOCK_SIZE_Y);
dim3 grid ((w+LEQUIV_BLOCK_SIZE_X-1)/LEQUIV_BLOCK_SIZE_X,
(h+LEQUIV_BLOCK_SIZE_Y-1)/LEQUIV_BLOCK_SIZE_Y);
hipMemcpyToArray(imgarray, 0, 0, img,
w*h*sizeof(unsigned char),
hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess) {
printf("midERROR: %s\n", hipGetErrorString(err));
return;
}
hipLaunchKernelGGL(( LEQUIV_prescan) , dim3(grid), dim3(block), 0, 0,
L, R, w, h);
stop = 0;
while (stop == 0) {
hipMemset(d_stop, 0xFF, sizeof(int));
hipLaunchKernelGGL(( LEQUIV_scan) , dim3(grid), dim3(block), 0, 0,
R, w, h, d_stop);
hipLaunchKernelGGL(( LEQUIV_analysis) , dim3(grid), dim3(block), 0, 0,
L, R, w, h);
hipLaunchKernelGGL(( LEQUIV_labeling) , dim3(grid), dim3(block), 0, 0,
L, w, h);
hipMemcpy(&stop, d_stop, sizeof(int),
hipMemcpyDeviceToHost);
}
hipMemcpy(label, L, w*h*sizeof(int),
hipMemcpyDeviceToHost);
hipFree(d_stop);
hipFree(L);
hipFree(R);
hipFreeArray(imgarray);
err = hipGetLastError();
if (err != hipSuccess) {
printf("endERROR: %s\n", hipGetErrorString(err));
return;
}
}
}
|
e3e991752f6de7cd39824ca99337bef34600bba6.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#include "textures.cuh"
namespace lequiv {
#define LEQUIV_BLOCK_SIZE_X 16
#define LEQUIV_BLOCK_SIZE_Y 16
__global__ void LEQUIV_prescan(int* L, int* R, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
L[index] = index;
R[index] = index;
}
}
__global__ void LEQUIV_scan(int* R, int w, int h, int* d_stop) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
unsigned char v = tex2D(imgtex, x, y);
int label = tex1Dfetch(Ltex, index);
int newlabel = w*h;
if (y>0 && tex2D(imgtex, x, y-1) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index-w));
}
if (y<h-1 && tex2D(imgtex, x, y+1) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index+w));
}
if (x>0 && tex2D(imgtex, x-1, y) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index-1));
}
if (x<w-1 && tex2D(imgtex, x+1, y) == v) {
newlabel = min(newlabel, tex1Dfetch(Ltex, index+1));
}
if (newlabel< label) {
R[label] = newlabel;
*d_stop = 0;
}
}
}
__global__ void LEQUIV_analysis(int* L, int* R, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
int label;
if (x < w && y < h) {
label = L[index];
if (label == index) {
int deep = 128;
int rf = label;
//label = tex1Dfetch(Rtex, rf);
label = R[rf];
while (rf!=label && deep>0) {
rf = label;
label = tex1Dfetch(Rtex, rf);
deep--;
}
//texture will be invalid
R[index] = label;
}
}
}
__global__ void LEQUIV_labeling(int* L, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
int label = L[index];
int cc = tex1Dfetch(Rtex, label);
L[index] = tex1Dfetch(Rtex, cc);
}
}
void CCL(unsigned char* img, int w, int h, int* label) {
cudaError_t err;
cudaArray* imgarray;
cudaChannelFormatDesc uchardesc =
cudaCreateChannelDesc<unsigned char>();
cudaMallocArray(&imgarray, &uchardesc, w, h);
int* L;
cudaMalloc((void**)&L, w*h*sizeof(int));
int* R;
cudaMalloc((void**)&R, w*h*sizeof(int));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("startERROR: %s\n", cudaGetErrorString(err));
return;
}
cudaChannelFormatDesc intdesc =
cudaCreateChannelDesc<int>();
cudaBindTextureToArray(imgtex, imgarray, uchardesc);
cudaBindTexture(NULL, Ltex, L, intdesc, w*h*sizeof(int));
cudaBindTexture(NULL, Rtex, R, intdesc, w*h*sizeof(int));
int stop;
int* d_stop;
cudaMalloc((void**)&d_stop, sizeof(int));
dim3 block (LEQUIV_BLOCK_SIZE_X, LEQUIV_BLOCK_SIZE_Y);
dim3 grid ((w+LEQUIV_BLOCK_SIZE_X-1)/LEQUIV_BLOCK_SIZE_X,
(h+LEQUIV_BLOCK_SIZE_Y-1)/LEQUIV_BLOCK_SIZE_Y);
cudaMemcpyToArray(imgarray, 0, 0, img,
w*h*sizeof(unsigned char),
cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("midERROR: %s\n", cudaGetErrorString(err));
return;
}
LEQUIV_prescan <<<grid, block>>>
(L, R, w, h);
stop = 0;
while (stop == 0) {
cudaMemset(d_stop, 0xFF, sizeof(int));
LEQUIV_scan <<<grid, block>>>
(R, w, h, d_stop);
LEQUIV_analysis <<<grid, block>>>
(L, R, w, h);
LEQUIV_labeling <<<grid, block>>>
(L, w, h);
cudaMemcpy(&stop, d_stop, sizeof(int),
cudaMemcpyDeviceToHost);
}
cudaMemcpy(label, L, w*h*sizeof(int),
cudaMemcpyDeviceToHost);
cudaFree(d_stop);
cudaFree(L);
cudaFree(R);
cudaFreeArray(imgarray);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("endERROR: %s\n", cudaGetErrorString(err));
return;
}
}
}
|
db75edfd475ab731f21b9a2f37ba424e2dd40179.hip
|
// !!! This is a file automatically generated by hipify!!!
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__)
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#endif
#include <opencv2/opencv.hpp>
#include <vector>
#include <chrono>
#include <string>
using namespace std;
__global__ void greyscale(unsigned char* rgb, unsigned char* g, const size_t cols, const size_t rows, const int mult)
{
auto tidx = blockIdx.x * blockDim.x + threadIdx.x;
auto tidy = blockIdx.y * blockDim.y + threadIdx.y;
if (tidx < cols && tidy < rows)
{
g[tidy * cols + tidx] = (
mult * rgb[3 * (tidy * cols + tidx)]
+ mult * rgb[3 * (tidy * cols + tidx) + 1]
+ mult * rgb[3 * (tidy * cols + tidx) + 2]
) / 1024;
}
}
void grayscaleStains(std::string file)
{
cv::Mat m_in = cv::imread(file, cv::IMREAD_UNCHANGED);
const int mult = 550;
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
srand(time(0));
std::vector< unsigned char > g(rows * cols);
cv::Mat m_out(rows, cols, CV_8UC1, g.data());
unsigned char* rgb_d;
unsigned char* g_d;
auto start = std::chrono::system_clock::now();
hipEvent_t cudaStart, cudaStop;
hipEventCreate(&cudaStart);
hipEventCreate(&cudaStop);
hipEventRecord(cudaStart);
hipMalloc(&rgb_d, 3 * rows * cols);
hipMalloc(&g_d, rows * cols);
hipMemcpy(rgb_d, rgb, 3 * rows * cols, hipMemcpyHostToDevice);
dim3 block(32, 32);
dim3 grid((cols - 1) / block.x + 1, (rows - 1) / block.y + 1); //(4,4)
cout << "rows : " << rows << endl;
cout << "cols : " << cols << endl;
//Test de nombre alatoire pour avoir quelque chose de viable
//int randomNumber = rand() % 1024;
//cout << "Random number : " << randomNumber << endl;
greyscale << <grid, block >> > (rgb_d, g_d, cols, rows, mult);
hipMemcpy(g.data(), g_d, rows * cols, hipMemcpyDeviceToHost);
hipEventRecord(cudaStop);
hipEventSynchronize(cudaStop);
auto stop = std::chrono::system_clock::now();
auto duration = stop - start;
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
float elapsedTime;
hipEventElapsedTime(&elapsedTime, cudaStart, cudaStop);
std::cout << "Temps kernel: " << elapsedTime << std::endl;
hipEventDestroy(cudaStart);
hipEventDestroy(cudaStop);
auto err = hipGetLastError();
std::cout << "Erreur: " << err << std::endl;
std::cout << ms << " ms" << std::endl;
cv::imwrite("gsCUDA.jpg", m_out);
cout << "Le fichier \"gsCUDA.jpg\" a bien ete genere. Toutes nos felicitations !" << endl;
hipFree(rgb_d);
hipFree(g_d);
}
|
db75edfd475ab731f21b9a2f37ba424e2dd40179.cu
|
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#endif
#include <opencv2/opencv.hpp>
#include <vector>
#include <chrono>
#include <string>
using namespace std;
__global__ void greyscale(unsigned char* rgb, unsigned char* g, const size_t cols, const size_t rows, const int mult)
{
auto tidx = blockIdx.x * blockDim.x + threadIdx.x;
auto tidy = blockIdx.y * blockDim.y + threadIdx.y;
if (tidx < cols && tidy < rows)
{
g[tidy * cols + tidx] = (
mult * rgb[3 * (tidy * cols + tidx)]
+ mult * rgb[3 * (tidy * cols + tidx) + 1]
+ mult * rgb[3 * (tidy * cols + tidx) + 2]
) / 1024;
}
}
void grayscaleStains(std::string file)
{
cv::Mat m_in = cv::imread(file, cv::IMREAD_UNCHANGED);
const int mult = 550;
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
srand(time(0));
std::vector< unsigned char > g(rows * cols);
cv::Mat m_out(rows, cols, CV_8UC1, g.data());
unsigned char* rgb_d;
unsigned char* g_d;
auto start = std::chrono::system_clock::now();
cudaEvent_t cudaStart, cudaStop;
cudaEventCreate(&cudaStart);
cudaEventCreate(&cudaStop);
cudaEventRecord(cudaStart);
cudaMalloc(&rgb_d, 3 * rows * cols);
cudaMalloc(&g_d, rows * cols);
cudaMemcpy(rgb_d, rgb, 3 * rows * cols, cudaMemcpyHostToDevice);
dim3 block(32, 32);
dim3 grid((cols - 1) / block.x + 1, (rows - 1) / block.y + 1); //(4,4)
cout << "rows : " << rows << endl;
cout << "cols : " << cols << endl;
//Test de nombre aléatoire pour avoir quelque chose de viable
//int randomNumber = rand() % 1024;
//cout << "Random number : " << randomNumber << endl;
greyscale << <grid, block >> > (rgb_d, g_d, cols, rows, mult);
cudaMemcpy(g.data(), g_d, rows * cols, cudaMemcpyDeviceToHost);
cudaEventRecord(cudaStop);
cudaEventSynchronize(cudaStop);
auto stop = std::chrono::system_clock::now();
auto duration = stop - start;
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, cudaStart, cudaStop);
std::cout << "Temps kernel: " << elapsedTime << std::endl;
cudaEventDestroy(cudaStart);
cudaEventDestroy(cudaStop);
auto err = cudaGetLastError();
std::cout << "Erreur: " << err << std::endl;
std::cout << ms << " ms" << std::endl;
cv::imwrite("gsCUDA.jpg", m_out);
cout << "Le fichier \"gsCUDA.jpg\" a bien ete genere. Toutes nos felicitations !" << endl;
cudaFree(rgb_d);
cudaFree(g_d);
}
|
71ea90c595baebcdc5d3280582b50313d381c557.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixSimpleMotionBlur.h"
#include "random.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ float3 traceCamera(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float ray_time
)
{
unsigned int r, g, b;
optixTrace(
handle,
ray_origin,
ray_direction,
0.0f, // tmin
1e16f, // tmax
ray_time,
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
r, g, b );
return make_float3(
__int_as_float( r ),
__int_as_float( g ),
__int_as_float( b )
);
}
static __forceinline__ __device__ void setPayload( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index );
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) );
const float2 d = 2.0f * make_float2(
( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
const float3 result = traceCamera( params.handle, ray_origin, ray_direction, rnd( seed ) );
const int image_index = idx.y*w + idx.x;
float3 accum_color = result;
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__camera()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
setPayload( rt_data->color );
}
extern "C" __global__ void __closesthit__camera()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
setPayload( rt_data->color );
}
extern "C" __global__ void __intersection__sphere()
{
HitGroupData* hg_data = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
const float3 orig = optixGetObjectRayOrigin();
const float3 dir = optixGetObjectRayDirection();
const float3 center = hg_data->center;
const float radius = hg_data->radius;
const float3 O = orig - center;
const float l = 1 / length( dir );
const float3 D = dir * l;
const float b = dot( O, D );
const float c = dot( O, O ) - radius * radius;
const float disc = b * b - c;
if( disc > 0.0f )
{
const float sdisc = sqrtf( disc );
const float root1 = ( -b - sdisc );
const float root11 = 0.0f;
const float3 shading_normal = ( O + ( root1 + root11 ) * D ) / radius;
unsigned int p0, p1, p2;
p0 = float_as_int( shading_normal.x );
p1 = float_as_int( shading_normal.y );
p2 = float_as_int( shading_normal.z );
optixReportIntersection(
root1, // t hit
0, // user hit kind
p0, p1, p2
);
}
}
|
71ea90c595baebcdc5d3280582b50313d381c557.cu
|
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixSimpleMotionBlur.h"
#include "random.h"
#include <sutil/vec_math.h>
#include <cuda/helpers.h>
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ float3 traceCamera(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float ray_time
)
{
unsigned int r, g, b;
optixTrace(
handle,
ray_origin,
ray_direction,
0.0f, // tmin
1e16f, // tmax
ray_time,
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
r, g, b );
return make_float3(
__int_as_float( r ),
__int_as_float( g ),
__int_as_float( b )
);
}
static __forceinline__ __device__ void setPayload( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index );
// The center of each pixel is at fraction (0.5,0.5)
const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) );
const float2 d = 2.0f * make_float2(
( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
const float3 result = traceCamera( params.handle, ray_origin, ray_direction, rnd( seed ) );
const int image_index = idx.y*w + idx.x;
float3 accum_color = result;
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__camera()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
setPayload( rt_data->color );
}
extern "C" __global__ void __closesthit__camera()
{
HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer();
setPayload( rt_data->color );
}
extern "C" __global__ void __intersection__sphere()
{
HitGroupData* hg_data = reinterpret_cast<HitGroupData*>( optixGetSbtDataPointer() );
const float3 orig = optixGetObjectRayOrigin();
const float3 dir = optixGetObjectRayDirection();
const float3 center = hg_data->center;
const float radius = hg_data->radius;
const float3 O = orig - center;
const float l = 1 / length( dir );
const float3 D = dir * l;
const float b = dot( O, D );
const float c = dot( O, O ) - radius * radius;
const float disc = b * b - c;
if( disc > 0.0f )
{
const float sdisc = sqrtf( disc );
const float root1 = ( -b - sdisc );
const float root11 = 0.0f;
const float3 shading_normal = ( O + ( root1 + root11 ) * D ) / radius;
unsigned int p0, p1, p2;
p0 = float_as_int( shading_normal.x );
p1 = float_as_int( shading_normal.y );
p2 = float_as_int( shading_normal.z );
optixReportIntersection(
root1, // t hit
0, // user hit kind
p0, p1, p2
);
}
}
|
1245aece651db258b17c52af3df415a54c7b82bc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010-2012, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* @authors: Cedric Cagniart, Koen Buys, Anatoly Baksheev
*
*/
#include <pcl/gpu/people/tree.h>
#include <pcl/gpu/people/label_common.h>
#include <pcl/gpu/utils/safe_call.hpp>
#include <pcl/gpu/utils/texture_binder.hpp>
#include <stdio.h>
#include <limits>
#include <assert.h>
#include "internal.h"
using pcl::gpu::people::trees::Node;
using pcl::gpu::people::trees::Label;
using pcl::gpu::people::trees::AttribLocation;
using pcl::gpu::people::trees::Attrib;
using pcl::gpu::people::trees::focal;
using pcl::gpu::people::trees::NUM_LABELS;
using namespace std;
using uint = unsigned int;
#ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code
#define __global__
#define __device__
#define __shared__
#define __forceinline__
#define __constant__
#endif
namespace pcl
{
namespace device
{
texture<unsigned short, 2, hipReadModeElementType> depthTex;
texture<char4, 2, hipReadModeElementType> multilabelTex;
__constant__ int constFGThresh;
template<bool testFG> __device__ __forceinline__ Label
evaluateTree(int u, int v, float f, int treeHeight, int numNodes, const Node* nodes, const Label* leaves)
{
int depth = tex2D(depthTex, u, v);
float scale = f / depth;
// go down the tree
int nid = 0;
for(int nodeDepth = 0; nodeDepth < treeHeight; ++nodeDepth)
{
const Node node = nodes[nid];
const AttribLocation& loc = node.loc;
int d1 = tex2D (depthTex, u + loc.du1 * scale, v + loc.dv1 * scale);
int d2 = tex2D (depthTex, u + loc.du2 * scale, v + loc.dv2 * scale);
if (testFG)
{
if( d1 - depth > constFGThresh )
d1 = std::numeric_limits<short>::max();
if( d2 - depth > constFGThresh )
d2 = std::numeric_limits<short>::max();
}
int delta = d1-d2;
bool test = delta > (int)node.thresh;
if( test ) nid = nid*2+2;
else nid = nid*2+1;
}
return leaves[nid-numNodes];
}
/** \brief This is the CUDA kernel doing the actual RDF evaluation */
__global__ void
KernelCUDA_runTree( const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<Label> labels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u < labels.cols && v < labels.rows)
labels.ptr(v)[u] = evaluateTree<false>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
template<bool testFG> __global__ void
KernelCUDA_MultiTreePass( const int treeId,
const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<unsigned short> depth,
PtrStepSz<char4> multiLabels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if(u < multiLabels.cols && v < multiLabels.rows)
{
// This maps a char4 pointer on a char pointer
char* pixel = (char*)&multiLabels.ptr(v)[u];
// This test assures that in next iterations the FGPreperation is taking into account see utils.cu
if(depth.ptr(v)[u] == std::numeric_limits<unsigned short>::max())
pixel[treeId] = 29; // see label_common.h for Background label (=29)
// TODO remove this hardcoded label with enum part_t label
else
pixel[treeId] = evaluateTree<testFG>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
}
/** \brief This function wraps the actual CUDA kernel doing the RDF evaluation */
void CUDA_runTree ( float focal, int treeHeight, int numNodes, const Node* nodes, const Label* leaves, const Depth& depth, Labels& labels )
{
labels.create( depth.rows(), depth.cols() );
depthTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
hipLaunchKernelGGL(( KernelCUDA_runTree), dim3(grid), dim3(block) , 0, 0, focal, treeHeight, numNodes, nodes, leaves, labels);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
void CUDA_runMultiTreePass ( int FGThresh,
int treeId,
float focal,
int treeHeight,
int numNodes,
const Node* nodes_device,
const Label* leaves_device,
const Depth& depth,
MultiLabels& multilabel )
{
//std::cout << "(I) : CUDA_runMultiTreePass() called" << std::endl;
depthTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid( divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
if(FGThresh == std::numeric_limits<int>::max())
{
hipLaunchKernelGGL(( KernelCUDA_MultiTreePass<false>), dim3(grid), dim3(block) , 0, 0, treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
else
{
cudaSafeCall( hipMemcpyToSymbol(constFGThresh, &FGThresh, sizeof(FGThresh)) );
hipLaunchKernelGGL(( KernelCUDA_MultiTreePass<true>), dim3(grid), dim3(block) , 0, 0, treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////
__device__ int findMaxId( int numBins, char* bins )
{
// HACK .. not testing against numBins = 0
int maxId = 0;
char maxVal = bins[0];
for(int i=1;i<numBins;++i)
{
char val = bins[i];
if( val > maxVal ) { maxId = i; maxVal = val; }
}
return maxId;
}
//this will find the max Index but return -1 if there is a tie
__device__ int findMaxId_testTie(int numBins, char* bins)
{
int maxId = 0;
int maxId_other = -1;
char maxVal = bins[0];
for(int i=1;i<numBins;++i) {
char val = bins[i];
if( val == maxVal ) { maxId_other = i; }
if( val > maxVal ) { maxId = i; maxId_other = -1; maxVal = val; }
}
if( maxId_other != -1) return -1;
else return maxId;
}
__global__ void KernelCUDA_MultiTreeMerge( const int numTrees, PtrStepSz<Label> labels )
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= labels.cols || v >= labels.rows)
return;
// reset the bins
char bins[NUM_LABELS];
for(int li = 0; li < NUM_LABELS; ++li)
bins[li] = 0;
// find a consensus with the current trees
{
char4 pixlabels = tex2D(multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ]++;
}
int res = findMaxId_testTie(NUM_LABELS, bins);
// if this fails... find a consensus in a 1 neighbourhood
if( res < 0 )
{
int depth = tex2D(depthTex, u,v);
for(int i = -1 ; i <= 1; ++i)
{
for(int j = -1; j <= 1; ++j)
{
int depth_neighbor = tex2D(depthTex,u+i,v+j);
char4 labels_neighbor = tex2D(multilabelTex, u+i,v+j);
char* bob = (char*)&labels_neighbor; //horrible but char4's have xyzw members
//TODO: redo this part
int weight = std::abs(depth-depth_neighbor) < 50 ? 1:0; // 5cms
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ] += weight;
}
}
res = findMaxId( NUM_LABELS, bins );
}
labels.ptr(v)[u] = res;
}
/** \brief This merges the labels from all trees into a histogram of probabilities **/
__global__ void KernelCUDA_MultiTreeCreateProb (const int numTrees, PtrStepSz<prob_histogram> prob)
{
// map block and thread onto image coordinates
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= prob.cols || v >= prob.rows )
return;
char4 pixlabels = tex2D (multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
// Reset prob first, this should become NUM_LABELS
for(int in = 0; in < NUM_LABELS; in++)
{
prob.ptr(v)[u].probs[in] = 0;
}
for(int ti = 0; ti < numTrees; ++ti)
{
// Each tree casts a vote to the probability
// TODO: replace this with a histogram copy
prob.ptr(v)[u].probs[bob[ti]] += 0.25;
}
}
/** \brief This will merge the votes from the different trees into one final vote */
void CUDA_runMultiTreeMerge( int numTrees, const Depth& depth, const MultiLabels& multilabel, Labels& labels)
{
//std::cout << "(I) : CUDA_runMultiTreeMerge() called" << std::endl;
labels.create(depth.rows(), depth.cols());
depthTex.addressMode[0] = hipAddressModeClamp;
TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = hipAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
hipLaunchKernelGGL(( KernelCUDA_MultiTreeMerge), dim3(grid), dim3(block) , 0, 0, numTrees, labels );
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
/** \brief This will merge the votes from the different trees into one final vote, including probabilistic's */
void CUDA_runMultiTreeProb ( int numTrees,
const Depth& depth,
const MultiLabels& multilabel,
Labels& labels,
LabelProbability& probabilities)
{
std::cout << "(I) : CUDA_runMultiTreeProb() called" << std::endl;
//labels.create(depth.rows(), depth.cols());
//depthTex.addressMode[0] = hipAddressModeClamp;
//TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = hipAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
hipLaunchKernelGGL(( KernelCUDA_MultiTreeCreateProb), dim3(grid), dim3(block) , 0, 0, numTrees, probabilities);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pcl::device::CUDATree::CUDATree (int treeHeight_arg, const std::vector<Node>& nodes, const std::vector<Label>& leaves)
{
treeHeight = treeHeight_arg;
numNodes = (1 << treeHeight) - 1;
assert (static_cast<int> (nodes.size ()) == numNodes );
assert (static_cast<int> (leaves.size ()) == (1 << treeHeight) );
nodes_device.upload(nodes);
leaves_device.upload(leaves);
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap)
{
// TODO: is this assert needed if we only call process?
//assert(!trees.empty());
// TODO is this iteration needed when we call multitreepass in the process step?
/* if (trees.size() == 1)
{
const CUDATree& t = trees[0];
CUDA_runTree( focal, t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, lmap );
return;
}
*/
process(dmap, lmap, std::numeric_limits<int>::max());
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<int> (trees.size ());
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
// 2 - run the merging
assert( numTrees <= 4 );
device::CUDA_runMultiTreeMerge(numTrees, dmap, multilmap, lmap);
}
void
pcl::device::MultiTreeLiveProc::processProb (const Depth& dmap, Labels& lmap, LabelProbability& prob, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<unsigned int> (trees.size ());
assert( numTrees <= 4 );
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
device::CUDA_runMultiTreeProb(numTrees, dmap, multilmap, lmap, prob);
}
|
1245aece651db258b17c52af3df415a54c7b82bc.cu
|
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2010-2012, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* @authors: Cedric Cagniart, Koen Buys, Anatoly Baksheev
*
*/
#include <pcl/gpu/people/tree.h>
#include <pcl/gpu/people/label_common.h>
#include <pcl/gpu/utils/safe_call.hpp>
#include <pcl/gpu/utils/texture_binder.hpp>
#include <stdio.h>
#include <limits>
#include <assert.h>
#include "internal.h"
using pcl::gpu::people::trees::Node;
using pcl::gpu::people::trees::Label;
using pcl::gpu::people::trees::AttribLocation;
using pcl::gpu::people::trees::Attrib;
using pcl::gpu::people::trees::focal;
using pcl::gpu::people::trees::NUM_LABELS;
using namespace std;
using uint = unsigned int;
#ifdef __CDT_PARSER__ // This is an eclipse specific hack, does nothing to the code
#define __global__
#define __device__
#define __shared__
#define __forceinline__
#define __constant__
#endif
namespace pcl
{
namespace device
{
texture<unsigned short, 2, cudaReadModeElementType> depthTex;
texture<char4, 2, cudaReadModeElementType> multilabelTex;
__constant__ int constFGThresh;
template<bool testFG> __device__ __forceinline__ Label
evaluateTree(int u, int v, float f, int treeHeight, int numNodes, const Node* nodes, const Label* leaves)
{
int depth = tex2D(depthTex, u, v);
float scale = f / depth;
// go down the tree
int nid = 0;
for(int nodeDepth = 0; nodeDepth < treeHeight; ++nodeDepth)
{
const Node node = nodes[nid];
const AttribLocation& loc = node.loc;
int d1 = tex2D (depthTex, u + loc.du1 * scale, v + loc.dv1 * scale);
int d2 = tex2D (depthTex, u + loc.du2 * scale, v + loc.dv2 * scale);
if (testFG)
{
if( d1 - depth > constFGThresh )
d1 = std::numeric_limits<short>::max();
if( d2 - depth > constFGThresh )
d2 = std::numeric_limits<short>::max();
}
int delta = d1-d2;
bool test = delta > (int)node.thresh;
if( test ) nid = nid*2+2;
else nid = nid*2+1;
}
return leaves[nid-numNodes];
}
/** \brief This is the CUDA kernel doing the actual RDF evaluation */
__global__ void
KernelCUDA_runTree( const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<Label> labels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u < labels.cols && v < labels.rows)
labels.ptr(v)[u] = evaluateTree<false>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
template<bool testFG> __global__ void
KernelCUDA_MultiTreePass( const int treeId,
const float f,
const int treeHeight,
const int numNodes,
const Node* nodes,
const Label* leaves,
PtrStepSz<unsigned short> depth,
PtrStepSz<char4> multiLabels)
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if(u < multiLabels.cols && v < multiLabels.rows)
{
// This maps a char4 pointer on a char pointer
char* pixel = (char*)&multiLabels.ptr(v)[u];
// This test assures that in next iterations the FGPreperation is taking into account see utils.cu
if(depth.ptr(v)[u] == std::numeric_limits<unsigned short>::max())
pixel[treeId] = 29; // see label_common.h for Background label (=29)
// TODO remove this hardcoded label with enum part_t label
else
pixel[treeId] = evaluateTree<testFG>(u, v, f, treeHeight, numNodes, nodes, leaves);
}
}
/** \brief This function wraps the actual CUDA kernel doing the RDF evaluation */
void CUDA_runTree ( float focal, int treeHeight, int numNodes, const Node* nodes, const Label* leaves, const Depth& depth, Labels& labels )
{
labels.create( depth.rows(), depth.cols() );
depthTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
KernelCUDA_runTree<<< grid, block >>>( focal, treeHeight, numNodes, nodes, leaves, labels);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
void CUDA_runMultiTreePass ( int FGThresh,
int treeId,
float focal,
int treeHeight,
int numNodes,
const Node* nodes_device,
const Label* leaves_device,
const Depth& depth,
MultiLabels& multilabel )
{
//std::cout << "(I) : CUDA_runMultiTreePass() called" << std::endl;
depthTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(depth, depthTex);
dim3 block(32, 8);
dim3 grid( divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
if(FGThresh == std::numeric_limits<int>::max())
{
KernelCUDA_MultiTreePass<false><<< grid, block >>>( treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
else
{
cudaSafeCall( cudaMemcpyToSymbol(constFGThresh, &FGThresh, sizeof(FGThresh)) );
KernelCUDA_MultiTreePass<true><<< grid, block >>>( treeId, focal, treeHeight,
numNodes, nodes_device, leaves_device, depth, multilabel);
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////////////
__device__ int findMaxId( int numBins, char* bins )
{
// HACK .. not testing against numBins = 0
int maxId = 0;
char maxVal = bins[0];
for(int i=1;i<numBins;++i)
{
char val = bins[i];
if( val > maxVal ) { maxId = i; maxVal = val; }
}
return maxId;
}
//this will find the max Index but return -1 if there is a tie
__device__ int findMaxId_testTie(int numBins, char* bins)
{
int maxId = 0;
int maxId_other = -1;
char maxVal = bins[0];
for(int i=1;i<numBins;++i) {
char val = bins[i];
if( val == maxVal ) { maxId_other = i; }
if( val > maxVal ) { maxId = i; maxId_other = -1; maxVal = val; }
}
if( maxId_other != -1) return -1;
else return maxId;
}
__global__ void KernelCUDA_MultiTreeMerge( const int numTrees, PtrStepSz<Label> labels )
{
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= labels.cols || v >= labels.rows)
return;
// reset the bins
char bins[NUM_LABELS];
for(int li = 0; li < NUM_LABELS; ++li)
bins[li] = 0;
// find a consensus with the current trees
{
char4 pixlabels = tex2D(multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ]++;
}
int res = findMaxId_testTie(NUM_LABELS, bins);
// if this fails... find a consensus in a 1 neighbourhood
if( res < 0 )
{
int depth = tex2D(depthTex, u,v);
for(int i = -1 ; i <= 1; ++i)
{
for(int j = -1; j <= 1; ++j)
{
int depth_neighbor = tex2D(depthTex,u+i,v+j);
char4 labels_neighbor = tex2D(multilabelTex, u+i,v+j);
char* bob = (char*)&labels_neighbor; //horrible but char4's have xyzw members
//TODO: redo this part
int weight = std::abs(depth-depth_neighbor) < 50 ? 1:0; // 5cms
for(int ti = 0; ti < numTrees; ++ti)
bins[ bob[ti] ] += weight;
}
}
res = findMaxId( NUM_LABELS, bins );
}
labels.ptr(v)[u] = res;
}
/** \brief This merges the labels from all trees into a histogram of probabilities **/
__global__ void KernelCUDA_MultiTreeCreateProb (const int numTrees, PtrStepSz<prob_histogram> prob)
{
// map block and thread onto image coordinates
int u = blockIdx.x * blockDim.x + threadIdx.x;
int v = blockIdx.y * blockDim.y + threadIdx.y;
if( u >= prob.cols || v >= prob.rows )
return;
char4 pixlabels = tex2D (multilabelTex, u ,v);
char* bob = (char*)&pixlabels; //horrible but char4's have xyzw members
// Reset prob first, this should become NUM_LABELS
for(int in = 0; in < NUM_LABELS; in++)
{
prob.ptr(v)[u].probs[in] = 0;
}
for(int ti = 0; ti < numTrees; ++ti)
{
// Each tree casts a vote to the probability
// TODO: replace this with a histogram copy
prob.ptr(v)[u].probs[bob[ti]] += 0.25;
}
}
/** \brief This will merge the votes from the different trees into one final vote */
void CUDA_runMultiTreeMerge( int numTrees, const Depth& depth, const MultiLabels& multilabel, Labels& labels)
{
//std::cout << "(I) : CUDA_runMultiTreeMerge() called" << std::endl;
labels.create(depth.rows(), depth.cols());
depthTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
KernelCUDA_MultiTreeMerge<<< grid, block >>>( numTrees, labels );
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
/** \brief This will merge the votes from the different trees into one final vote, including probabilistic's */
void CUDA_runMultiTreeProb ( int numTrees,
const Depth& depth,
const MultiLabels& multilabel,
Labels& labels,
LabelProbability& probabilities)
{
std::cout << "(I) : CUDA_runMultiTreeProb() called" << std::endl;
//labels.create(depth.rows(), depth.cols());
//depthTex.addressMode[0] = cudaAddressModeClamp;
//TextureBinder binder(depth, depthTex);
multilabelTex.addressMode[0] = cudaAddressModeClamp;
TextureBinder mlabels_binder(multilabel, multilabelTex);
dim3 block(32, 8);
dim3 grid(divUp(depth.cols(), block.x), divUp(depth.rows(), block.y) );
KernelCUDA_MultiTreeCreateProb<<< grid, block >>>( numTrees, probabilities);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pcl::device::CUDATree::CUDATree (int treeHeight_arg, const std::vector<Node>& nodes, const std::vector<Label>& leaves)
{
treeHeight = treeHeight_arg;
numNodes = (1 << treeHeight) - 1;
assert (static_cast<int> (nodes.size ()) == numNodes );
assert (static_cast<int> (leaves.size ()) == (1 << treeHeight) );
nodes_device.upload(nodes);
leaves_device.upload(leaves);
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap)
{
// TODO: is this assert needed if we only call process?
//assert(!trees.empty());
// TODO is this iteration needed when we call multitreepass in the process step?
/* if (trees.size() == 1)
{
const CUDATree& t = trees[0];
CUDA_runTree( focal, t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, lmap );
return;
}
*/
process(dmap, lmap, std::numeric_limits<int>::max());
}
void
pcl::device::MultiTreeLiveProc::process (const Depth& dmap, Labels& lmap, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<int> (trees.size ());
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
// 2 - run the merging
assert( numTrees <= 4 );
device::CUDA_runMultiTreeMerge(numTrees, dmap, multilmap, lmap);
}
void
pcl::device::MultiTreeLiveProc::processProb (const Depth& dmap, Labels& lmap, LabelProbability& prob, int FGThresh)
{
assert(!trees.empty());
unsigned int numTrees = static_cast<unsigned int> (trees.size ());
assert( numTrees <= 4 );
multilmap.create(dmap.rows(), dmap.cols());
// 1 - run the multi passes
for( int ti = 0; ti < numTrees; ++ti )
{
const CUDATree& t = trees[ti];
CUDA_runMultiTreePass ( FGThresh, ti, static_cast<float> (focal), t.treeHeight, t.numNodes, t.nodes_device, t.leaves_device, dmap, multilmap );
}
device::CUDA_runMultiTreeProb(numTrees, dmap, multilmap, lmap, prob);
}
|
a76c0c6c63d4e01300ab8529df9f07d7ab16a138.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuda_timing.h"
#include <cassert>
#include <numeric>
#include <cmath>
#ifdef USE_MPI
# include <mpi.h>
#endif
std::map<std::string, Timing::LoopData> Timing::loops;
std::vector<int> Timing::stack;
int Timing::counter = 0;
bool Timing::measure = true;
void Timing::pushRange(const std::string &_name) {
if (!measure) return;
roctxRangePushA(_name.c_str());
}
void Timing::popRange() {
if (!measure) return;
roctxRangePop();
}
void Timing::markStart(const std::string &_name) {
if (!measure) return;
roctxMarkA(_name.c_str());
}
void Timing::startTimer(const std::string &_name) {
if (!measure) return;
pushRange(_name);
if (loops.size() == 0) counter = 0;
int parent = stack.size() == 0 ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
int index;
if (loops.find(fullname) != loops.end()) {
loops[fullname].current = clock::now();
index = loops[fullname].index;
} else {
index = counter;
loops[fullname] = LoopData(); // = {counter++, parent, 0.0, now, {}};
loops[fullname].index = counter++;
loops[fullname].parent = parent;
loops[fullname].current = clock::now();
}
stack.push_back(index);
}
void Timing::stopTimer(const std::string &_name) {
if (!measure) return;
auto now = clock::now();
stack.pop_back();
int parent = stack.empty() ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
loops[fullname].time +=
std::chrono::duration_cast<std::chrono::duration<double>>(
now - loops[fullname].current)
.count();
popRange();
}
void Timing::startTimerCUDA(const std::string &_name, hipStream_t stream) {
if (!measure) return;
markStart(_name);
if (loops.size() == 0) counter = 0;
int parent = stack.size() == 0 ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
hipEvent_t start;
cudaSafeCall(hipEventCreate(&start));
cudaSafeCall(hipEventRecord(start, stream));
int index;
if (loops.find(fullname) != loops.end()) {
loops[fullname].event_pairs.push_back(start);
index = loops[fullname].index;
} else {
// loops[fullname] = {counter++, parent, 0.0, clock::now(), {start}};
loops[fullname] = LoopData(); // = {counter++, parent, 0.0, now, {}};
loops[fullname].index = counter++;
loops[fullname].parent = parent;
// loops[fullname].event_pairs = {start};
loops[fullname].event_pairs.reserve(10);
loops[fullname].event_pairs.push_back(start);
index = counter - 1;
}
stack.push_back(index);
}
void Timing::stopTimerCUDA(const std::string &_name, hipStream_t stream) {
if (!measure) return;
stack.pop_back();
int parent = stack.empty() ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
hipEvent_t stop;
cudaSafeCall(hipEventCreate(&stop));
cudaSafeCall(hipEventRecord(stop, stream));
loops[fullname].event_pairs.push_back(stop);
}
void Timing::reportWithParent(int parent, const std::string &indentation) {
for (const auto &element : loops) {
const LoopData &l = element.second;
if (l.parent == parent) {
#ifdef USE_MPI
int rank, nproc;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
std::vector<double> times(nproc, 0);
MPI_Gather(&l.time, 1, MPI_DOUBLE, times.data(), 1, MPI_DOUBLE, 0,
MPI_COMM_WORLD);
if (!rank) {
double mean = 0.0;
double max = times[0];
double min = times[0];
for (double t : times) {
mean += t;
max = ::max(max, t);
min = ::min(min, t);
}
mean = mean / nproc;
double stddev =
std::accumulate(times.begin(), times.end(), 0.0,
[&](const double &sum, const double &time) {
return sum + (time - mean) * (time - mean);
});
stddev = std::sqrt(stddev / nproc);
std::cout << indentation + element.first + ": ";
std::cout << min << "s; " << max << "s; " << mean << "s; " << stddev
<< "s;\n";
}
#else
std::cout << indentation + element.first + ": "
<< std::to_string(l.time) + " seconds\n";
#endif
reportWithParent(l.index, indentation + " ");
}
}
}
void Timing::sumCudaEvents() {
for (auto &element : loops) {
LoopData &loop = element.second;
assert(loop.event_pairs.size() % 2 == 0 &&
"CUDA event measurement not closed!");
for (int i = 0; 2 * i < loop.event_pairs.size(); ++i) {
float milliseconds = 0;
cudaSafeCall(hipEventElapsedTime(&milliseconds, loop.event_pairs[2 * i],
loop.event_pairs[2 * i + 1]));
loop.time += milliseconds / 1000;
}
loop.event_pairs.clear();
}
}
void Timing::reset() {
loops.clear();
stack.clear();
counter = 0;
}
void Timing::suspend_prof() { measure = false; }
void Timing::continue_prof() { measure = true; }
void Timing::report() {
sumCudaEvents();
reportWithParent(-1, " ");
}
|
a76c0c6c63d4e01300ab8529df9f07d7ab16a138.cu
|
#include "cuda_timing.h"
#include <cassert>
#include <numeric>
#include <cmath>
#ifdef USE_MPI
# include <mpi.h>
#endif
std::map<std::string, Timing::LoopData> Timing::loops;
std::vector<int> Timing::stack;
int Timing::counter = 0;
bool Timing::measure = true;
void Timing::pushRange(const std::string &_name) {
if (!measure) return;
nvtxRangePushA(_name.c_str());
}
void Timing::popRange() {
if (!measure) return;
nvtxRangePop();
}
void Timing::markStart(const std::string &_name) {
if (!measure) return;
nvtxMarkA(_name.c_str());
}
void Timing::startTimer(const std::string &_name) {
if (!measure) return;
pushRange(_name);
if (loops.size() == 0) counter = 0;
int parent = stack.size() == 0 ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
int index;
if (loops.find(fullname) != loops.end()) {
loops[fullname].current = clock::now();
index = loops[fullname].index;
} else {
index = counter;
loops[fullname] = LoopData(); // = {counter++, parent, 0.0, now, {}};
loops[fullname].index = counter++;
loops[fullname].parent = parent;
loops[fullname].current = clock::now();
}
stack.push_back(index);
}
void Timing::stopTimer(const std::string &_name) {
if (!measure) return;
auto now = clock::now();
stack.pop_back();
int parent = stack.empty() ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
loops[fullname].time +=
std::chrono::duration_cast<std::chrono::duration<double>>(
now - loops[fullname].current)
.count();
popRange();
}
void Timing::startTimerCUDA(const std::string &_name, cudaStream_t stream) {
if (!measure) return;
markStart(_name);
if (loops.size() == 0) counter = 0;
int parent = stack.size() == 0 ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
cudaEvent_t start;
cudaSafeCall(cudaEventCreate(&start));
cudaSafeCall(cudaEventRecord(start, stream));
int index;
if (loops.find(fullname) != loops.end()) {
loops[fullname].event_pairs.push_back(start);
index = loops[fullname].index;
} else {
// loops[fullname] = {counter++, parent, 0.0, clock::now(), {start}};
loops[fullname] = LoopData(); // = {counter++, parent, 0.0, now, {}};
loops[fullname].index = counter++;
loops[fullname].parent = parent;
// loops[fullname].event_pairs = {start};
loops[fullname].event_pairs.reserve(10);
loops[fullname].event_pairs.push_back(start);
index = counter - 1;
}
stack.push_back(index);
}
void Timing::stopTimerCUDA(const std::string &_name, cudaStream_t stream) {
if (!measure) return;
stack.pop_back();
int parent = stack.empty() ? -1 : stack.back();
std::string fullname = _name + "(" + std::to_string(parent) + ")";
cudaEvent_t stop;
cudaSafeCall(cudaEventCreate(&stop));
cudaSafeCall(cudaEventRecord(stop, stream));
loops[fullname].event_pairs.push_back(stop);
}
void Timing::reportWithParent(int parent, const std::string &indentation) {
for (const auto &element : loops) {
const LoopData &l = element.second;
if (l.parent == parent) {
#ifdef USE_MPI
int rank, nproc;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
std::vector<double> times(nproc, 0);
MPI_Gather(&l.time, 1, MPI_DOUBLE, times.data(), 1, MPI_DOUBLE, 0,
MPI_COMM_WORLD);
if (!rank) {
double mean = 0.0;
double max = times[0];
double min = times[0];
for (double t : times) {
mean += t;
max = std::max(max, t);
min = std::min(min, t);
}
mean = mean / nproc;
double stddev =
std::accumulate(times.begin(), times.end(), 0.0,
[&](const double &sum, const double &time) {
return sum + (time - mean) * (time - mean);
});
stddev = std::sqrt(stddev / nproc);
std::cout << indentation + element.first + ": ";
std::cout << min << "s; " << max << "s; " << mean << "s; " << stddev
<< "s;\n";
}
#else
std::cout << indentation + element.first + ": "
<< std::to_string(l.time) + " seconds\n";
#endif
reportWithParent(l.index, indentation + " ");
}
}
}
void Timing::sumCudaEvents() {
for (auto &element : loops) {
LoopData &loop = element.second;
assert(loop.event_pairs.size() % 2 == 0 &&
"CUDA event measurement not closed!");
for (int i = 0; 2 * i < loop.event_pairs.size(); ++i) {
float milliseconds = 0;
cudaSafeCall(cudaEventElapsedTime(&milliseconds, loop.event_pairs[2 * i],
loop.event_pairs[2 * i + 1]));
loop.time += milliseconds / 1000;
}
loop.event_pairs.clear();
}
}
void Timing::reset() {
loops.clear();
stack.clear();
counter = 0;
}
void Timing::suspend_prof() { measure = false; }
void Timing::continue_prof() { measure = true; }
void Timing::report() {
sumCudaEvents();
reportWithParent(-1, " ");
}
|
8b490148b337f372254fed1bde28a092833c8caa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "robgpu_settings.h"
#include <R.h>
#include <Rdefines.h>
#include "gpuMCD.h" // needs MYTYPE
#include "compute_det.h" // needs MYTYPE
#include "compute_inverse.h" // needs MYTYPE
#include "mahalanobis_distance.h"
#include "colwisesd.h"
#include "colwisemean.h"
#include "cuseful.h"
#include "tools.h"
#include "reduction.h"
#include <rocblas.h>
// divide correlation matrix elements by sd(x)*sd(y)
__global__ void divide_by_value_indexed_kernel(MYTYPE * d_cors, int dim)
{
d_cors[blockIdx.y*gridDim.x + blockIdx.x] /= ((MYTYPE)(dim - 1));
// d_cors[blockIdx.y*gridDim.x + blockIdx.x] /= (d_x_sds[blockIdx.x] * d_y_sds[blockIdx.y]);
}
__host__ void gpuMCD(MYTYPE * h_x,
int n,
int p,
MYTYPE * h_covMat,
SEXP p_covMat,
MYTYPE * h_covMat_det,
int nsamp,
int sample_size,
int * p_sample_index,
unsigned int gpuID)
{
// GPU device memory
MYTYPE * d_x = NULL;
MYTYPE * d_x_means = NULL;
MYTYPE * d_x_subsample = NULL;
int * d_sample_index = NULL;
MYTYPE * d_cov = NULL;
MYTYPE * d_mh_dist = NULL;
// host memory
MYTYPE * h_covMat_inverse = NULL;
h_covMat_inverse = (MYTYPE *) calloc (p * p, sizeof(MYTYPE));
// printf("n: %d p: %d nsamp: %d sample_size: %d\n", n, p, nsamp, sample_size);
hipSetDevice(gpuID);
hipblasInit();
checkCublasError("mcd cublas init...");
// allocate device memory
hipblasAlloc(p * n, sizeof(MYTYPE), (void**) &d_x);
hipblasAlloc(p, sizeof(MYTYPE), (void**) &d_x_means);
hipblasAlloc(p * sample_size, sizeof(MYTYPE), (void**) &d_x_subsample);
hipblasAlloc(nsamp * sample_size, sizeof(int), (void**) &d_sample_index);
hipblasAlloc(p*p, sizeof(MYTYPE), (void**) &d_cov);
hipblasAlloc(p, sizeof(MYTYPE), (void**) &d_mh_dist);
checkCublasError("mcd gpu memory allocation");
// copy input data to gpu
hipblasSetMatrix(n, p, sizeof(MYTYPE), h_x, n, d_x, n);
hipblasSetMatrix(nsamp, sample_size, sizeof(int), p_sample_index, nsamp, d_sample_index, nsamp);
checkCublasError("mcd set matrix");
for (unsigned int i = 0; i < nsamp; i ++) {
// printf("%3d ", i);
// performance question:
// better to copy GPU -> GPU or HOST -> GPU
// problem: limited GPU RAM
// no subsample-wise -- colwise mean/center etc necessary ->
// when using dgemm JUST on the subsample... => GPU -> GPU
int maxBlocks = 1;
int maxThreads = 256;
int kernel = 7; // 7 is colwisemean
// to be computed
int numBlocks = 0;
int numThreads = 0;
// TODO resolve strange getNumBlocks... etc computation
getNumBlocksAndThreads(kernel, sample_size, maxBlocks, maxThreads, numBlocks, numThreads);
dim3 blocks(numBlocks, p, 1);
// printf("numBlocks: %d, numThreads: %d \n", numBlocks, numThreads);
hipLaunchKernelGGL(( subsample_kernel), dim3(blocks), dim3(numThreads), 0, 0, d_x, n, d_x_subsample, sample_size, d_sample_index, nsamp, i);
checkCublasError("mcd subsample kernel");
// compute colwise means
colwisemean_internal(d_x_subsample, p, sample_size, d_x_means);
checkCublasError("mcd colwise sd internal");
// center kernel
// center the input data columnwise
hipLaunchKernelGGL(( columnwisecenter_kernel), dim3(blocks), dim3(numThreads), 0, 0, d_x_subsample, sample_size, d_x_means);
checkCublasError("mcd colwise center internal");
// do the matrix multiplication
hipblasDgemm('T', 'N', p, p, sample_size, 1.0, d_x_subsample, sample_size, d_x_subsample, sample_size, 0.0, d_cov, p);
checkCublasError("mcd dgemm");
dim3 dimGrid(p, p);
dim3 dimBlock(1);
hipLaunchKernelGGL(( divide_by_value_indexed_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_cov, sample_size);
checkCublasError("mcd divide_by_value_indexed_kernel");
hipblasGetMatrix(p, p, sizeof(MYTYPE), d_cov, p, h_covMat, p);
checkCublasError("mcd get matrix");
// compute determinant
h_covMat_det[i] = compute_det(p_covMat);
// compute inverse of subsample covariance matrix
compute_inverse(p_covMat);
// copy inverse of covariance matrix to GPU
hipblasSetMatrix(p, p, sizeof(MYTYPE), h_covMat, p, d_cov, p);
// compute the Mahalanobis distance from each observation to the
// subsample center
getNumBlocksAndThreads(kernel, sample_size, maxBlocks, maxThreads, numBlocks, numThreads);
// mahalanobis_distance_wrapper(p, p, numThreads, numBlocks, d_x, d_x_means, d_cov, d_mh_dist);
mahalanobis_distance_wrapper(p, p, numThreads, numBlocks, d_cov, d_mh_dist);
}
free(h_covMat_inverse);
hipblasFree(d_x);
hipblasFree(d_x_means);
hipblasFree(d_x_subsample);
hipblasFree(d_sample_index);
hipblasFree(d_cov);
hipblasShutdown();
}
|
8b490148b337f372254fed1bde28a092833c8caa.cu
|
#include <stdio.h>
#include "robgpu_settings.h"
#include <R.h>
#include <Rdefines.h>
#include "gpuMCD.h" // needs MYTYPE
#include "compute_det.h" // needs MYTYPE
#include "compute_inverse.h" // needs MYTYPE
#include "mahalanobis_distance.h"
#include "colwisesd.h"
#include "colwisemean.h"
#include "cuseful.h"
#include "tools.h"
#include "reduction.h"
#include <cublas.h>
// divide correlation matrix elements by sd(x)*sd(y)
__global__ void divide_by_value_indexed_kernel(MYTYPE * d_cors, int dim)
{
d_cors[blockIdx.y*gridDim.x + blockIdx.x] /= ((MYTYPE)(dim - 1));
// d_cors[blockIdx.y*gridDim.x + blockIdx.x] /= (d_x_sds[blockIdx.x] * d_y_sds[blockIdx.y]);
}
__host__ void gpuMCD(MYTYPE * h_x,
int n,
int p,
MYTYPE * h_covMat,
SEXP p_covMat,
MYTYPE * h_covMat_det,
int nsamp,
int sample_size,
int * p_sample_index,
unsigned int gpuID)
{
// GPU device memory
MYTYPE * d_x = NULL;
MYTYPE * d_x_means = NULL;
MYTYPE * d_x_subsample = NULL;
int * d_sample_index = NULL;
MYTYPE * d_cov = NULL;
MYTYPE * d_mh_dist = NULL;
// host memory
MYTYPE * h_covMat_inverse = NULL;
h_covMat_inverse = (MYTYPE *) calloc (p * p, sizeof(MYTYPE));
// printf("n: %d p: %d nsamp: %d sample_size: %d\n", n, p, nsamp, sample_size);
cudaSetDevice(gpuID);
cublasInit();
checkCublasError("mcd cublas init...");
// allocate device memory
cublasAlloc(p * n, sizeof(MYTYPE), (void**) &d_x);
cublasAlloc(p, sizeof(MYTYPE), (void**) &d_x_means);
cublasAlloc(p * sample_size, sizeof(MYTYPE), (void**) &d_x_subsample);
cublasAlloc(nsamp * sample_size, sizeof(int), (void**) &d_sample_index);
cublasAlloc(p*p, sizeof(MYTYPE), (void**) &d_cov);
cublasAlloc(p, sizeof(MYTYPE), (void**) &d_mh_dist);
checkCublasError("mcd gpu memory allocation");
// copy input data to gpu
cublasSetMatrix(n, p, sizeof(MYTYPE), h_x, n, d_x, n);
cublasSetMatrix(nsamp, sample_size, sizeof(int), p_sample_index, nsamp, d_sample_index, nsamp);
checkCublasError("mcd set matrix");
for (unsigned int i = 0; i < nsamp; i ++) {
// printf("%3d ", i);
// performance question:
// better to copy GPU -> GPU or HOST -> GPU
// problem: limited GPU RAM
// no subsample-wise -- colwise mean/center etc necessary ->
// when using dgemm JUST on the subsample... => GPU -> GPU
int maxBlocks = 1;
int maxThreads = 256;
int kernel = 7; // 7 is colwisemean
// to be computed
int numBlocks = 0;
int numThreads = 0;
// TODO resolve strange getNumBlocks... etc computation
getNumBlocksAndThreads(kernel, sample_size, maxBlocks, maxThreads, numBlocks, numThreads);
dim3 blocks(numBlocks, p, 1);
// printf("numBlocks: %d, numThreads: %d \n", numBlocks, numThreads);
subsample_kernel<<< blocks, numThreads>>>(d_x, n, d_x_subsample, sample_size, d_sample_index, nsamp, i);
checkCublasError("mcd subsample kernel");
// compute colwise means
colwisemean_internal(d_x_subsample, p, sample_size, d_x_means);
checkCublasError("mcd colwise sd internal");
// center kernel
// center the input data columnwise
columnwisecenter_kernel<<< blocks, numThreads>>>(d_x_subsample, sample_size, d_x_means);
checkCublasError("mcd colwise center internal");
// do the matrix multiplication
cublasDgemm('T', 'N', p, p, sample_size, 1.0, d_x_subsample, sample_size, d_x_subsample, sample_size, 0.0, d_cov, p);
checkCublasError("mcd dgemm");
dim3 dimGrid(p, p);
dim3 dimBlock(1);
divide_by_value_indexed_kernel<<<dimGrid,dimBlock>>>(d_cov, sample_size);
checkCublasError("mcd divide_by_value_indexed_kernel");
cublasGetMatrix(p, p, sizeof(MYTYPE), d_cov, p, h_covMat, p);
checkCublasError("mcd get matrix");
// compute determinant
h_covMat_det[i] = compute_det(p_covMat);
// compute inverse of subsample covariance matrix
compute_inverse(p_covMat);
// copy inverse of covariance matrix to GPU
cublasSetMatrix(p, p, sizeof(MYTYPE), h_covMat, p, d_cov, p);
// compute the Mahalanobis distance from each observation to the
// subsample center
getNumBlocksAndThreads(kernel, sample_size, maxBlocks, maxThreads, numBlocks, numThreads);
// mahalanobis_distance_wrapper(p, p, numThreads, numBlocks, d_x, d_x_means, d_cov, d_mh_dist);
mahalanobis_distance_wrapper(p, p, numThreads, numBlocks, d_cov, d_mh_dist);
}
free(h_covMat_inverse);
cublasFree(d_x);
cublasFree(d_x_means);
cublasFree(d_x_subsample);
cublasFree(d_sample_index);
cublasFree(d_cov);
cublasShutdown();
}
|
6834f0a4766da489cec4163141a2d048ed19e395.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
/*
* NXN Matrix Multiplication
*/
__global__ void
matMult(const int *A, const int *B, int *C, const int DIM)
{
extern __shared__ int shared[];
int *As = &shared[0]; // s stands for shared
int *Bs = &shared[DIM]; // s stands for shared
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < DIM)
{
for(int j = 0; j < DIM; j++){
int idx = (i*DIM)+j;
for(int k = 0; k < DIM; k++){
As[i] = A[(i*DIM)+k];
Bs[i] = B[(k*DIM)+j];
__syncthreads();
// C[idx] += A[(i*DIM)+k] * B[(k*DIM)+j];
C[idx] += As[k] * Bs[k];
__syncthreads();
}
}
}
}
void printMat(int * M, int XDIM){
int i;
int j;
for(i = 0; i < XDIM; i++){
for(j = 0; j < XDIM; j++){
printf(" %d ", M[i*XDIM+j]);
}
printf("\n");
}
}
int
main(void)
{
//STEP 1 : Allocate in host
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the mat size to be used, and compute its size
const int YDIM = 2;
const int XDIM = 2;
size_t size = sizeof(int*)*YDIM*XDIM;
printf("[Mat multiplication of %d elements]\n", YDIM);
// Allocate the host input vector A
int * h_A = (int *)malloc(size);
// Allocate the host input vector B
int * h_B = (int *)malloc(size);
// Allocate the host output vector C
int * h_C = (int *)malloc(size);
// Initialize h_A and h_B with random numbers, h_C with 0's
for(int i = 0; i < XDIM*XDIM; i++){
h_A[i] = rand() & 0xF;
h_B[i] = rand() & 0xF;
h_C[i] = 0;
}
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//STEP 2: ALLOCATE IN CUDA
// Allocate device memory
int *d_A = NULL;
int *d_B = NULL;
int *d_C = NULL;
hipError_t error;
error = hipMalloc((void **) &d_A, size);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, size);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, size);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Launch the Mat mult CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(XDIM + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( matMult), dim3(blocksPerGrid), dim3(threadsPerBlock), 2*XDIM*sizeof(int), 0, d_A, d_B, d_C, XDIM);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
/*for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}*/
printf("Test PASSED\n");
fflush(stdout);
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
printMat(h_A,XDIM);
printMat(h_B,XDIM);
printMat(h_C,XDIM);
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
6834f0a4766da489cec4163141a2d048ed19e395.cu
|
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
/*
* NXN Matrix Multiplication
*/
__global__ void
matMult(const int *A, const int *B, int *C, const int DIM)
{
extern __shared__ int shared[];
int *As = &shared[0]; // s stands for shared
int *Bs = &shared[DIM]; // s stands for shared
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < DIM)
{
for(int j = 0; j < DIM; j++){
int idx = (i*DIM)+j;
for(int k = 0; k < DIM; k++){
As[i] = A[(i*DIM)+k];
Bs[i] = B[(k*DIM)+j];
__syncthreads();
// C[idx] += A[(i*DIM)+k] * B[(k*DIM)+j];
C[idx] += As[k] * Bs[k];
__syncthreads();
}
}
}
}
void printMat(int * M, int XDIM){
int i;
int j;
for(i = 0; i < XDIM; i++){
for(j = 0; j < XDIM; j++){
printf(" %d ", M[i*XDIM+j]);
}
printf("\n");
}
}
int
main(void)
{
//STEP 1 : Allocate in host
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the mat size to be used, and compute its size
const int YDIM = 2;
const int XDIM = 2;
size_t size = sizeof(int*)*YDIM*XDIM;
printf("[Mat multiplication of %d elements]\n", YDIM);
// Allocate the host input vector A
int * h_A = (int *)malloc(size);
// Allocate the host input vector B
int * h_B = (int *)malloc(size);
// Allocate the host output vector C
int * h_C = (int *)malloc(size);
// Initialize h_A and h_B with random numbers, h_C with 0's
for(int i = 0; i < XDIM*XDIM; i++){
h_A[i] = rand() & 0xF;
h_B[i] = rand() & 0xF;
h_C[i] = 0;
}
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//STEP 2: ALLOCATE IN CUDA
// Allocate device memory
int *d_A = NULL;
int *d_B = NULL;
int *d_C = NULL;
cudaError_t error;
error = cudaMalloc((void **) &d_A, size);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, size);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, size);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Launch the Mat mult CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(XDIM + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
matMult<<<blocksPerGrid, threadsPerBlock, 2*XDIM*sizeof(int)>>>(d_A, d_B, d_C, XDIM);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
/*for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}*/
printf("Test PASSED\n");
fflush(stdout);
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
printMat(h_A,XDIM);
printMat(h_B,XDIM);
printMat(h_C,XDIM);
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
414e6cc0fbeee70d5db8cc1f064e309df6f60c31.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author: Bohao Zhang
Oct. 29 2019
arm_planning mex
a cuda array for a cluster of rotatotopes
*/
#ifndef ROTATOTOPE_ARRAY_CPP
#define ROTATOTOPE_ARRAY_CPP
#include "rotatotopeArray.h"
rotatotopeArray::rotatotopeArray(uint32_t n_links_input, uint32_t n_time_steps_input, uint32_t joint_per_link_input, double* R_input, double* dev_R_input, uint32_t R_unit_length_input, uint8_t* dev_rot_axes_input, double* Z_input, uint32_t Z_width_input, uint32_t Z_length_input, uint32_t reduce_order_input, double* g_k_input) {
debugMode = false;
n_links = n_links_input;
n_time_steps = n_time_steps_input;
joint_per_link = joint_per_link_input;
dev_R = dev_R_input;
R_unit_length = R_unit_length_input;
dev_rot_axes = dev_rot_axes_input;
reduce_order = reduce_order_input;
if (n_links > 0) {
Z = Z_input;
Z_length = Z_length_input;
Z_width = Z_width_input;
Z_unit_length = Z_length / n_links;
hipMalloc((void**)&dev_Z, Z_width * Z_length * sizeof(double));
hipMemcpy(dev_Z, Z, Z_width * Z_length * sizeof(double), hipMemcpyHostToDevice);
c_k = new double[n_links * 2];
g_k = new double[n_links * 2];
for (uint32_t joint_id = 0; joint_id < n_links * 2; joint_id++) {
uint32_t R_id_start = ((joint_id + 1) * n_time_steps - 1) * R_unit_length;
c_k[joint_id] = R_input[R_id_start * 5 + k_dim];
g_k[joint_id] = g_k_input[joint_id];
}
double* dev_RZ_new;
hipMalloc((void**)&dev_RZ, n_links * n_time_steps * reduce_order * Z_width * sizeof(double));
hipMalloc((void**)&dev_RZ_new, n_links * n_time_steps * reduce_order * R_unit_length * Z_width * sizeof(double));
bool *dev_c_idx_new;
uint8_t *dev_k_idx_new, *dev_C_idx_new;
hipMalloc((void**)&dev_c_idx, n_links * n_time_steps * reduce_order * sizeof(bool));
hipMemset(dev_c_idx, 0, n_links * n_time_steps * reduce_order * sizeof(bool));
hipMalloc((void**)&dev_c_idx_new, n_links * n_time_steps * reduce_order * R_unit_length * sizeof(bool));
hipMemset(dev_c_idx_new, 0, n_links * n_time_steps * reduce_order * R_unit_length * sizeof(bool));
hipMalloc((void**)&dev_k_idx, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
hipMemset(dev_k_idx, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
hipMalloc((void**)&dev_k_idx_new, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
hipMemset(dev_k_idx_new, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
hipMalloc((void**)&dev_C_idx, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
hipMemset(dev_C_idx, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
hipMalloc((void**)&dev_C_idx_new, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
hipMemset(dev_C_idx_new, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
dim3 grid1(n_links, n_time_steps, 1);
dim3 block1(reduce_order, Z_width, 1);
initialize_RZ_kernel << < grid1, block1 >> > (dev_Z, Z_unit_length, reduce_order, dev_RZ, dev_c_idx);
for (int link = n_links; link > 0; link--) {
for (int joint_offset = joint_per_link - 1; joint_offset >= 0; joint_offset--) {
dim3 grid2(link, n_time_steps, 1);
dim3 block2(reduce_order, R_unit_length, 1);
multiply_kernel << < grid2, block2 >> > (dev_rot_axes, n_links - link, joint_offset, reduce_order, dev_RZ, dev_R, dev_c_idx, dev_k_idx, dev_C_idx, dev_RZ_new, dev_c_idx_new, dev_k_idx_new, dev_C_idx_new);
reduce_kernel << < grid2, (reduce_order * R_unit_length) >> > (dev_RZ_new, dev_c_idx_new, dev_k_idx_new, dev_C_idx_new, n_links - link, reduce_order, dev_RZ, dev_c_idx, dev_k_idx, dev_C_idx);
}
}
hipFree(dev_RZ_new);
hipFree(dev_c_idx_new);
hipFree(dev_k_idx_new);
hipFree(dev_C_idx_new);
}
else {
c_k = nullptr;
g_k = nullptr;
Z = nullptr;
dev_Z = nullptr;
dev_RZ = nullptr;
dev_c_idx = nullptr;
dev_k_idx = nullptr;
dev_C_idx = nullptr;
}
n_pairs = 0;
self_pairs = nullptr;
dev_RZ_stack = nullptr;
dev_c_idx_stack = nullptr;
dev_k_idx_stack = nullptr;
dev_C_idx_stack = nullptr;
RZ_length = nullptr;
n_obstacles = 0;
A_con = nullptr;
dev_A_con = nullptr;
d_con = nullptr;
dev_d_con = nullptr;
delta_con = nullptr;
dev_delta_con = nullptr;
k_con = nullptr;
dev_k_con = nullptr;
k_con_num = nullptr;
dev_k_con_num = nullptr;
max_k_con_num = nullptr;
A_con_self = nullptr;
dev_A_con_self = nullptr;
d_con_self = nullptr;
dev_d_con_self = nullptr;
delta_con_self = nullptr;
dev_delta_con_self = nullptr;
k_con_self = nullptr;
dev_k_con_self = nullptr;
k_con_num_self = nullptr;
dev_k_con_num_self = nullptr;
max_k_con_num_self = nullptr;
current_k_opt = new double[n_links * 2];
con = nullptr;
jaco_con = nullptr;
hess_con = nullptr;
con_self = nullptr;
jaco_con_self = nullptr;
hess_con_self = nullptr;
}
__global__ void initialize_RZ_kernel(double* link_Z, uint32_t link_Z_length, uint32_t reduce_order, double* RZ, bool* c_idx) {
uint32_t link_id = blockIdx.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t w_id = threadIdx.y;
uint32_t Z_width = blockDim.y;
if (z_id < link_Z_length) {
RZ[((link_id * n_time_steps + time_id) * reduce_order + z_id) * Z_width + w_id] = link_Z[(link_id * link_Z_length + z_id) * Z_width + w_id];
}
else {
RZ[((link_id * n_time_steps + time_id) * reduce_order + z_id) * Z_width + w_id] = 0;
}
if (z_id == 0) c_idx[(link_id * n_time_steps + time_id) * reduce_order] = true;
}
__global__ void multiply_kernel(uint8_t* rot_axes, uint32_t link_offset, uint32_t joint_offset, uint32_t reduce_order, double* RZ, double* R, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx, double* RZ_new, bool* c_idx_new, uint8_t* k_idx_new, uint8_t* C_idx_new) {
uint32_t link_id = blockIdx.x + link_offset;
uint32_t joint_id = blockIdx.x * 2 + joint_offset;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t r_id = threadIdx.y;
uint32_t R_unit_length = blockDim.y;
uint32_t mul_Z = (link_id * n_time_steps + time_id) * reduce_order + z_id;
uint32_t mul_R = (joint_id * n_time_steps + time_id) * R_unit_length + r_id;
uint32_t mul_RZ = ((link_id * n_time_steps + time_id) * reduce_order + z_id) * R_unit_length + r_id;
uint8_t rot_axis = rot_axes[joint_id];
bool if_center = (r_id == 0); // true if center, false if not
if (rot_axis == 1) {
RZ_new[mul_RZ * 3] = if_center ? RZ[mul_Z * 3] : 0;
RZ_new[mul_RZ * 3 + 1] = R[mul_R * 5] * RZ[mul_Z * 3 + 1] - R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 2];
RZ_new[mul_RZ * 3 + 2] = R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 1] + R[mul_R * 5] * RZ[mul_Z * 3 + 2];
}
else if (rot_axis == 2) {
RZ_new[mul_RZ * 3] = R[mul_R * 5] * RZ[mul_Z * 3] + R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 2];
RZ_new[mul_RZ * 3 + 1] = if_center ? RZ[mul_Z * 3 + 1] : 0;
RZ_new[mul_RZ * 3 + 2] = R[mul_R * 5] * RZ[mul_Z * 3 + 2] - R[mul_R * 5 + 1] * RZ[mul_Z * 3];
}
else {
RZ_new[mul_RZ * 3] = R[mul_R * 5] * RZ[mul_Z * 3] - R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 1];
RZ_new[mul_RZ * 3 + 1] = R[mul_R * 5 + 1] * RZ[mul_Z * 3] + R[mul_R * 5] * RZ[mul_Z * 3 + 1];
RZ_new[mul_RZ * 3 + 2] = if_center ? RZ[mul_Z * 3 + 2] : 0;
}
c_idx_new[mul_RZ] = c_idx[mul_Z];
// update k_idx for this joint
uint32_t k_id = link_id * (link_id + 1) + joint_id;
uint32_t mul_k = (k_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
if (R[mul_R * 5 + k_dim] != 0) {
k_idx_new[mul_k] = 2;
}
else {
k_idx_new[mul_k] = 1;
}
// update k_idx for previous joints
for (uint32_t joint_k_id = joint_id + 1; joint_k_id < (link_id + 1) * 2; joint_k_id++) {
k_id = link_id * (link_id + 1) + joint_k_id;
uint32_t mul_z = (k_id * n_time_steps + time_id) * reduce_order + z_id;
mul_k = (k_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
k_idx_new[mul_k] = k_idx[mul_z];
}
// update C_idx for this joint
uint32_t C_id = link_id * (link_id + 1) + joint_id;
uint32_t mul_C = (C_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
if (r_id == 0) {
C_idx_new[mul_C] = 2;
}
else {
C_idx_new[mul_C] = 1;
}
// update C_idx for previous joints
for (uint32_t joint_k_id = joint_id + 1; joint_k_id < (link_id + 1) * 2; joint_k_id++) {
C_id = link_id * (link_id + 1) + joint_k_id;
uint32_t mul_z = (C_id * n_time_steps + time_id) * reduce_order + z_id;
mul_C = (C_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
C_idx_new[mul_C] = C_idx[mul_z];
}
}
__global__ void reduce_kernel(double* RZ_new, bool* c_idx_new, uint8_t* k_idx_new, uint8_t* C_idx_new, uint32_t link_offset, uint32_t reduce_order, double* RZ, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx) {
uint32_t link_id = blockIdx.x + link_offset;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t norm_length = blockDim.x;
uint32_t mul_Z = (link_id * n_time_steps + time_id) * norm_length + z_id; // we never reduce the center
__shared__ double RZ_norm[MAX_NORM_SIZE];
__shared__ uint32_t RZ_id[MAX_NORM_SIZE];
RZ_norm[z_id] = 0;
double norm;
for (uint32_t i = 0; i < 3; i++) {
norm = RZ_new[mul_Z * 3 + i];
RZ_norm[z_id] += norm * norm;
}
RZ_id[z_id] = z_id;
__syncthreads();
uint32_t base = (link_id * n_time_steps + time_id) * norm_length; // indeces offset for RZ_new
uint32_t k_start = ((link_id * (link_id + 1)) * n_time_steps + time_id) * norm_length;
uint32_t k_end = (((link_id + 1) * (link_id + 2)) * n_time_steps + time_id) * norm_length;
uint32_t k_step = n_time_steps * norm_length;
if (z_id == 0) {
// choose the vectors whose norm is among (reduce_order - 3) largest
uint32_t high = norm_length;
uint32_t low = 1;
uint32_t k = reduce_order - 3;
uint32_t i, j;
while (low < high) {
i = low;
j = high - 1;
double pivot = RZ_norm[low];
while (i <= j) {
while (i <= j && RZ_norm[i] >= pivot)
i++;
while (i <= j && RZ_norm[j] < pivot)
j--;
if (i < j) {
double temp_double = RZ_norm[i];
RZ_norm[i] = RZ_norm[j];
RZ_norm[j] = temp_double;
uint32_t temp = RZ_id[i];
RZ_id[i] = RZ_id[j];
RZ_id[j] = temp;
i++;
j--;
}
}
double temp_double = RZ_norm[low];
RZ_norm[low] = RZ_norm[j];
RZ_norm[j] = temp_double;
uint32_t temp = RZ_id[low];
RZ_id[low] = RZ_id[j];
RZ_id[j] = temp;
if (j == k - 1)
break;
else if (j < k - 1)
low = j + 1;
else
high = j;
}
}
__syncthreads();
// at this point, the first (reduce_order - 3) entries in RZ_new are the (reduce_order - 3) largest ones
// we choose them as entries for RZ after reduction.
// we compress the rest of the entries to a box with 3 generators
uint32_t base_ori = (link_id * n_time_steps + time_id) * reduce_order; // indeces offset for RZ
uint32_t k_start_ori = ((link_id * (link_id + 1)) * n_time_steps + time_id) * reduce_order;
uint32_t k_end_ori = (((link_id + 1) * (link_id + 2)) * n_time_steps + time_id) * reduce_order;
uint32_t k_step_ori = n_time_steps * reduce_order;
if (z_id < reduce_order - 3) { // copy these generators to RZ
uint32_t sorted_id = RZ_id[z_id];
c_idx[base_ori + z_id] = c_idx_new[base + sorted_id];
for (uint32_t h = 0; h < 3; h++) {
RZ[(base_ori + z_id) * 3 + h] = RZ_new[(base + sorted_id) * 3 + h];
}
uint32_t k_pivot = k_start, k_pivot_ori = k_start_ori;
while (k_pivot != k_end && k_pivot_ori != k_end_ori) {
k_idx[k_pivot_ori + z_id] = k_idx_new[k_pivot + sorted_id];
k_pivot += k_step;
k_pivot_ori += k_step_ori;
}
uint32_t C_pivot = k_start, C_pivot_ori = k_start_ori;
while (C_pivot != k_end && C_pivot_ori != k_end_ori) {
C_idx[C_pivot_ori + z_id] = C_idx_new[C_pivot + sorted_id];
C_pivot += k_step;
C_pivot_ori += k_step_ori;
}
}
else if (reduce_order - 3 <= z_id && z_id < reduce_order) { // construct a 3-d box for the rest of the generators
uint32_t box_id = (z_id + 3) - reduce_order;
double entry_sum = 0;
for (uint32_t h = reduce_order - 3; h < norm_length; h++) {
uint32_t sorted_id = RZ_id[h];
entry_sum += abs(RZ_new[(base + sorted_id) * 3 + box_id]);
}
for (uint32_t h = 0; h < 3; h++) {
if (h == box_id) {
RZ[(base_ori + z_id) * 3 + h] = entry_sum;
}
else {
RZ[(base_ori + z_id) * 3 + h] = 0;
}
}
c_idx[base_ori + z_id] = false;
for (uint32_t h = k_start_ori; h < k_end_ori; h += k_step_ori) {
k_idx[h + z_id] = 1;
}
for (uint32_t h = k_start_ori; h < k_end_ori; h += k_step_ori) {
C_idx[h + z_id] = 1;
}
}
}
void rotatotopeArray::stack(rotatotopeArray &EEs, rotatotopeArray &base) {
RZ_stack = new double*[n_links];
dev_RZ_stack = new double*[n_links];
c_idx_stack = new bool*[n_links];
dev_c_idx_stack = new bool*[n_links];
k_idx_stack = new uint8_t*[n_links];
dev_k_idx_stack = new uint8_t*[n_links];
C_idx_stack = new uint8_t*[n_links];
dev_C_idx_stack = new uint8_t*[n_links];
RZ_length = new uint32_t[n_links];
for (uint32_t link_id = 0; link_id < n_links; link_id++) {
RZ_length[link_id] = reduce_order + link_id * (EEs.reduce_order - 1) + base.reduce_order - 1;
RZ_stack[link_id] = nullptr;
hipMalloc((void**)&(dev_RZ_stack[link_id]), n_time_steps * RZ_length[link_id] * Z_width * sizeof(double));
c_idx_stack[link_id] = nullptr;
hipMalloc((void**)&(dev_c_idx_stack[link_id]), n_time_steps * RZ_length[link_id] * sizeof(bool));
k_idx_stack[link_id] = nullptr;
hipMalloc((void**)&(dev_k_idx_stack[link_id]), 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
hipMemset(dev_k_idx_stack, 0, 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
C_idx_stack[link_id] = nullptr;
hipMalloc((void**)&(dev_C_idx_stack[link_id]), 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
hipMemset(dev_C_idx_stack, 0, 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
// copy dev_RZ to dev_RZ_stack
dim3 grid1(n_time_steps, 1, 1);
dim3 block1(reduce_order, Z_width, 1);
copy_kernel << < grid1, block1 >> > (link_id, dev_RZ, dev_c_idx, dev_k_idx, dev_C_idx, reduce_order, EEs.reduce_order, dev_RZ_stack[link_id], dev_c_idx_stack[link_id], dev_k_idx_stack[link_id], dev_C_idx_stack[link_id]);
// stack with EE
for (int EE_id = link_id - 1; EE_id >= 0; EE_id--) {
dim3 grid2(n_time_steps, 1, 1);
dim3 block2(EEs.reduce_order, Z_width, 1);
stack_kernel << < grid2, block2 >> > (link_id, EE_id, EE_id, reduce_order, EEs.reduce_order, dev_RZ_stack[link_id], EEs.dev_RZ, dev_c_idx_stack[link_id], EEs.dev_c_idx, dev_k_idx_stack[link_id], EEs.dev_k_idx, dev_C_idx_stack[link_id], EEs.dev_C_idx);
}
// stack with base
dim3 grid3(n_time_steps, 1, 1);
dim3 block3(base.reduce_order, Z_width, 1);
stack_kernel << < grid3, block3 >> > (link_id, 0, link_id, reduce_order, base.reduce_order, dev_RZ_stack[link_id], base.dev_RZ, dev_c_idx_stack[link_id], base.dev_c_idx, dev_k_idx_stack[link_id], base.dev_k_idx, dev_C_idx_stack[link_id], base.dev_C_idx);
// origin shift
hipLaunchKernelGGL(( origin_shift_kernel) , dim3(n_time_steps), dim3(1) , 0, 0, RZ_length[link_id], dev_RZ_stack[link_id]);
}
uint32_t link_id = 0;
if(debugMode){
debug_RZ = new double[n_time_steps * RZ_length[link_id] * Z_width];
hipMemcpy(debug_RZ, dev_RZ_stack[link_id], n_time_steps * RZ_length[link_id] * Z_width * sizeof(double), hipMemcpyDeviceToHost);
debug_c_idx = new bool[n_time_steps * RZ_length[link_id]];
hipMemcpy(debug_c_idx, dev_c_idx_stack[link_id], n_time_steps * RZ_length[link_id] * sizeof(bool), hipMemcpyDeviceToHost);
debug_k_idx = new uint8_t[2 * (link_id + 1) * n_time_steps * RZ_length[link_id]];
hipMemcpy(debug_k_idx, dev_k_idx_stack[link_id], 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t), hipMemcpyDeviceToHost);
debug_C_idx = new uint8_t[2 * (link_id + 1) * n_time_steps * RZ_length[link_id]];
hipMemcpy(debug_C_idx, dev_C_idx_stack[link_id], 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t), hipMemcpyDeviceToHost);
}
else{
debug_RZ = nullptr;
debug_c_idx = nullptr;
debug_k_idx = nullptr;
debug_C_idx = nullptr;
}
}
__global__ void copy_kernel(uint32_t link_id, double* RZ, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx, uint32_t link_reduce_order, uint32_t point_reduce_order, double* RZ_stack, bool* c_idx_stack, uint8_t* k_idx_stack, uint8_t* C_idx_stack) {
uint32_t time_id = blockIdx.x;
uint32_t n_time_steps = gridDim.x;
uint32_t Z_id = threadIdx.x;
uint32_t z_id = threadIdx.y;
uint32_t RZ_length = link_reduce_order + (link_id + 1) * (point_reduce_order - 1);
uint32_t copy_Z = time_id * RZ_length + Z_id;
uint32_t copy_k_start = time_id * RZ_length + Z_id;
uint32_t copy_k_step = n_time_steps * RZ_length;
uint32_t link_Z = (link_id * n_time_steps + time_id) * link_reduce_order + Z_id;
uint32_t link_k_start = ((link_id * (link_id + 1)) * n_time_steps + time_id) * link_reduce_order + Z_id;
uint32_t link_k_end = (((link_id + 1) * (link_id + 2)) * n_time_steps + time_id) * link_reduce_order + Z_id;
uint32_t link_k_step = n_time_steps * link_reduce_order;
RZ_stack[copy_Z * 3 + z_id] = RZ[link_Z * 3 + z_id];
if (z_id == 0) {
c_idx_stack[copy_Z] = c_idx[link_Z];
uint32_t copy_k = copy_k_start;
for (uint32_t link_k = link_k_start; link_k < link_k_end; link_k += link_k_step) {
k_idx_stack[copy_k] = k_idx[link_k];
copy_k += copy_k_step;
}
uint32_t copy_C = copy_k_start;
for (uint32_t link_C = link_k_start; link_C < link_k_end; link_C += link_k_step) {
C_idx_stack[copy_C] = C_idx[link_C];
copy_C += copy_k_step;
}
}
}
__global__ void stack_kernel(uint32_t link_id, uint32_t EE_id, uint32_t stack_offset, uint32_t link_reduce_order, uint32_t point_reduce_order, double* RZ_stack, double* EE_RZ, bool* c_idx_stack, bool* EE_c_idx, uint8_t* k_idx_stack, uint8_t* EE_k_idx, uint8_t* C_idx_stack, uint8_t* EE_C_idx) {
uint32_t time_id = blockIdx.x;
uint32_t n_time_steps = gridDim.x;
uint32_t Z_id = threadIdx.x;
uint32_t z_id = threadIdx.y;
uint32_t RZ_length = link_reduce_order + (link_id + 1) * (point_reduce_order - 1);
uint32_t stack_Z = time_id * RZ_length + Z_id;
uint32_t stack_k_start = time_id * RZ_length + Z_id;
uint32_t stack_k_end = (2 * (link_id + 1) * n_time_steps + time_id) * RZ_length + Z_id;
uint32_t stack_k_step = n_time_steps * RZ_length;
uint32_t EE_Z = (EE_id * n_time_steps + time_id) * point_reduce_order + Z_id;
uint32_t EE_k_start = ((EE_id * (EE_id + 1)) * n_time_steps + time_id) * point_reduce_order + Z_id;
uint32_t EE_k_end = (((EE_id + 1) * (EE_id + 2)) * n_time_steps + time_id) * point_reduce_order + Z_id;
uint32_t EE_k_step = n_time_steps * point_reduce_order;
if (Z_id == 0) { // add the center
RZ_stack[stack_Z * 3 + z_id] += EE_RZ[EE_Z * 3 + z_id];
if (z_id == 0) {
c_idx_stack[stack_Z] = true;
}
}
else { // stack the generators
uint32_t stack_offset_length = link_reduce_order - 1 + stack_offset * (point_reduce_order - 1);
RZ_stack[(stack_Z + stack_offset_length) * 3 + z_id] = EE_RZ[EE_Z * 3 + z_id];
if (z_id == 0) {
c_idx_stack[(stack_Z + stack_offset_length)] = EE_c_idx[EE_Z];
uint32_t EE_k = EE_k_start;
for (uint32_t stack_k = stack_k_start + stack_offset_length; stack_k < stack_k_end + stack_offset_length; stack_k += stack_k_step) {
if (EE_k < EE_k_end) {
k_idx_stack[stack_k] = EE_k_idx[EE_k];
}
else {
k_idx_stack[stack_k] = 0;
}
EE_k += EE_k_step;
}
uint32_t EE_C = EE_k_start;
for (uint32_t stack_C = stack_k_start + stack_offset_length; stack_C < stack_k_end + stack_offset_length; stack_C += stack_k_step) {
if (EE_C < EE_k_end) {
C_idx_stack[stack_C] = EE_C_idx[EE_C];
}
else {
C_idx_stack[stack_C] = 0;
}
EE_C += EE_k_step;
}
}
}
}
__global__ void origin_shift_kernel(uint32_t RZ_length, double* RZ_stack){
uint32_t time_id = blockIdx.x;
uint32_t stack_Z = time_id * RZ_length;
RZ_stack[stack_Z * 3 ] += ORIGIN_SHIFT_X;
RZ_stack[stack_Z * 3 + 1] += ORIGIN_SHIFT_Y;
RZ_stack[stack_Z * 3 + 2] += ORIGIN_SHIFT_Z;
}
void rotatotopeArray::generate_constraints(uint32_t n_obstacles_in, double* OZ, uint32_t OZ_width, uint32_t OZ_length) {
// obstacle constraints
n_obstacles = n_obstacles_in;
if(n_obstacles == 0) return;
uint32_t OZ_unit_length = OZ_length / n_obstacles;
double* dev_OZ;
hipMalloc((void**)&dev_OZ, OZ_length * OZ_width * sizeof(double));
hipMemcpy(dev_OZ, OZ, OZ_length * OZ_width * sizeof(double), hipMemcpyHostToDevice);
A_con = new double*[n_links];
dev_A_con = new double*[n_links];
d_con = new double*[n_links];
dev_d_con = new double*[n_links];
delta_con = new double*[n_links];
dev_delta_con = new double*[n_links];
k_con = new bool*[n_links];
dev_k_con = new bool*[n_links];
k_con_num = new uint8_t*[n_links];
dev_k_con_num = new uint8_t*[n_links];
max_k_con_num = new uint32_t[n_links];
for (uint32_t link_id = 0; link_id < n_links; link_id++) {
uint32_t buff_obstacle_length = RZ_length[link_id] + (OZ_unit_length - 1);
uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2;
// buffer the obstacle by k-independent generators
k_con[link_id] = new bool[2 * (link_id + 1) * n_time_steps * RZ_length[link_id]];
hipMalloc((void**)&(dev_k_con[link_id]), 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(bool));
k_con_num[link_id] = new uint8_t[n_time_steps];
hipMalloc((void**)&(dev_k_con_num[link_id]), n_time_steps * sizeof(uint8_t));
double* dev_buff_obstacles;
hipMalloc((void**)&dev_buff_obstacles, n_obstacles * n_time_steps * buff_obstacle_length * 3 * sizeof(double));
hipMemset(dev_buff_obstacles, 0, n_obstacles * n_time_steps * buff_obstacle_length * 3 * sizeof(double));
double* dev_frs_k_dep_G;
hipMalloc((void**)&dev_frs_k_dep_G, n_time_steps * RZ_length[link_id] * 3 * sizeof(double));
hipMemset(dev_frs_k_dep_G, 0, n_time_steps * RZ_length[link_id] * 3 * sizeof(double));
dim3 grid2(n_obstacles, n_time_steps, 1);
buff_obstacles_kernel << < grid2, RZ_length[link_id] >> > (link_id, RZ_length[link_id], dev_RZ_stack[link_id], dev_c_idx_stack[link_id], dev_k_idx_stack[link_id], dev_C_idx_stack[link_id], dev_OZ, OZ_unit_length, dev_buff_obstacles, dev_frs_k_dep_G, dev_k_con[link_id], dev_k_con_num[link_id]);
if(debugMode){
hipMemcpy(k_con[link_id], dev_k_con[link_id], 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(bool), hipMemcpyDeviceToHost);
}
hipMemcpy(k_con_num[link_id], dev_k_con_num[link_id], n_time_steps * sizeof(uint8_t), hipMemcpyDeviceToHost);
// find the maximum width of A_con for memory allocation
max_k_con_num[link_id] = 0;
for (uint32_t i = 0; i < n_time_steps; i++) {
if (k_con_num[link_id][i] > max_k_con_num[link_id]) {
max_k_con_num[link_id] = k_con_num[link_id][i];
}
}
// generate obstacles polynomials
hipMalloc((void**)&(dev_A_con[link_id]), n_obstacles * n_time_steps * constraint_length * max_k_con_num[link_id] * sizeof(double));
hipMalloc((void**)&(dev_d_con[link_id]), n_obstacles * n_time_steps * constraint_length * sizeof(double));
hipMalloc((void**)&(dev_delta_con[link_id]), n_obstacles * n_time_steps * constraint_length * sizeof(double));
dim3 grid3(n_obstacles, n_time_steps, 1);
polytope << < grid2, constraint_length >> > (buff_obstacle_length, RZ_length[link_id], dev_buff_obstacles, dev_frs_k_dep_G, dev_k_con_num[link_id], max_k_con_num[link_id], dev_A_con[link_id], dev_d_con[link_id], dev_delta_con[link_id]);
if(debugMode){
A_con[link_id] = new double[n_obstacles * n_time_steps * constraint_length * max_k_con_num[link_id]];
hipMemcpy(A_con[link_id], dev_A_con[link_id], n_obstacles * n_time_steps * constraint_length * max_k_con_num[link_id] * sizeof(double), hipMemcpyDeviceToHost);
d_con[link_id] = new double[n_obstacles * n_time_steps * constraint_length];
hipMemcpy(d_con[link_id], dev_d_con[link_id], n_obstacles * n_time_steps * constraint_length * sizeof(double), hipMemcpyDeviceToHost);
delta_con[link_id] = new double[n_obstacles * n_time_steps * constraint_length];
hipMemcpy(delta_con[link_id], dev_delta_con[link_id], n_obstacles * n_time_steps * constraint_length * sizeof(double), hipMemcpyDeviceToHost);
}
else{
A_con[link_id] = nullptr;
d_con[link_id] = nullptr;
delta_con[link_id] = nullptr;
}
hipFree(dev_buff_obstacles);
hipFree(dev_frs_k_dep_G);
}
hipFree(dev_OZ);
}
__global__ void buff_obstacles_kernel(uint32_t link_id, uint32_t RZ_length, double* RZ, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx, double* OZ, uint32_t OZ_unit_length, double* buff_obstacles, double* frs_k_dep_G, bool* k_con, uint8_t* k_con_num) {
uint32_t obstacle_id = blockIdx.x;
uint32_t obstacle_base = obstacle_id * OZ_unit_length;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t buff_obstacle_length = RZ_length + 3;
uint32_t RZ_base = time_id * RZ_length;
uint32_t k_start = time_id * RZ_length;
uint32_t k_end = (2 * (link_id + 1) * n_time_steps + time_id) * RZ_length;
uint32_t k_step = n_time_steps * RZ_length;
uint32_t k_con_num_base = time_id;
uint32_t buff_base = (obstacle_id * n_time_steps + time_id) * buff_obstacle_length;
// first, find kc_col
__shared__ bool kc_info[MAX_RZ_LENGTH];
kc_info[z_id] = true;
for (uint32_t i = k_start; i < k_end; i += k_step) {
kc_info[z_id] &= (k_idx[i + z_id] != 1) || (C_idx[i + z_id] != 1);
}
kc_info[z_id] &= c_idx[RZ_base + z_id];
__syncthreads();
if (z_id == 0) { // process the original obstacle zonotope
for (uint32_t i = 0; i < 3; i++) {
buff_obstacles[buff_base * 3 + i] = OZ[obstacle_base * 3 + i] - RZ[RZ_base * 3 + i];
}
for (uint32_t obs_g = 1; obs_g < OZ_unit_length; obs_g++) {
for (uint32_t i = 0; i < 3; i++) {
buff_obstacles[(buff_base + obs_g) * 3 + i] = OZ[(obstacle_base + obs_g) * 3 + i];
// buffer the obstacle, suppose the generators are an eye matrix
if(i == obs_g - 1) buff_obstacles[(buff_base + obs_g) * 3 + i] += BUFFER_DIST / 2.0;
}
}
}
else if (z_id == 1) { // find k-dependent generators and complete k_con
if (obstacle_id == 0) {
uint8_t k_dep_num = 0;
for (uint32_t z = 1; z < RZ_length; z++) {
if (kc_info[z]) {
for (uint32_t j = k_start; j < k_end; j += k_step) {
k_con[j + k_dep_num] = (k_idx[j + z] == 2);
}
for (uint32_t i = 0; i < 3; i++) {
frs_k_dep_G[(RZ_base + k_dep_num) * 3 + i] = RZ[(RZ_base + z) * 3 + i];
}
k_dep_num++;
}
}
k_con_num[k_con_num_base] = k_dep_num;
}
}
else if (z_id == 2) { // find k-independent generators and complete buff_obstacles
uint8_t k_indep_num = OZ_unit_length;
// add a test here, reduce small generators to be a box
double reduced_generators[3];
reduced_generators[0] = 0;
reduced_generators[1] = 0;
reduced_generators[2] = 0;
for (uint32_t z = 1; z < RZ_length; z++) {
if (!kc_info[z]) {
double norm = 0;
for (uint32_t i = 0; i < 3; i++) {
norm += RZ[(RZ_base + z) * 3 + i] * RZ[(RZ_base + z) * 3 + i];
}
if(norm >= TOO_SMALL_POLYTOPE_JUDGE){
for (uint32_t i = 0; i < 3; i++) {
buff_obstacles[(buff_base + k_indep_num) * 3 + i] = RZ[(RZ_base + z) * 3 + i];
}
k_indep_num++;
}
else{
for (uint32_t i = 0; i < 3; i++) {
reduced_generators[i] += RZ[(RZ_base + z) * 3 + i];
}
}
}
}
for (uint32_t i = 0; i < 3; i++) {
for (uint32_t j = 0; j < 3; j++){
if(i == j){
buff_obstacles[(buff_base + k_indep_num) * 3 + j] = reduced_generators[i];
}
else{
buff_obstacles[(buff_base + k_indep_num) * 3 + j] = 0;
}
}
k_indep_num++;
}
}
}
__global__ void polytope(uint32_t buff_obstacle_length, uint32_t k_dep_G_length, double* buff_obstacles, double* frs_k_dep_G, uint8_t* k_con_num, uint32_t A_con_width, double* A_con, double* d_con, double* delta_con) {
uint32_t obstacle_id = blockIdx.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
double buff_obstacle_size = (double)buff_obstacle_length - 1.0;
uint32_t constraint_length = (buff_obstacle_length - 1) * (buff_obstacle_length - 2) / 2;
uint32_t k_con_base = time_id;
uint32_t k_dep_G_base = k_con_base * k_dep_G_length;
uint32_t obs_base = (obstacle_id * n_time_steps + time_id) * buff_obstacle_length;
uint32_t c_id = threadIdx.x;
uint32_t first = (uint32_t)floor(-0.5*sqrt(4 * buff_obstacle_size * buff_obstacle_size - 4 * buff_obstacle_size - 8.0 * ((double)c_id) + 1.0) + buff_obstacle_size - 0.5);
uint32_t first_base = (obs_base + first + 1) * 3;
uint32_t second = c_id + 1 - ((2 * (buff_obstacle_length - 1) - 3 - first) * first) / 2;
uint32_t second_base = (obs_base + second + 1) * 3;
uint32_t con_base = (obstacle_id * n_time_steps + time_id) * constraint_length + c_id;
double A_1 = buff_obstacles[first_base + 1] * buff_obstacles[second_base + 2] - buff_obstacles[first_base + 2] * buff_obstacles[second_base + 1];
double A_2 = buff_obstacles[first_base + 2] * buff_obstacles[second_base ] - buff_obstacles[first_base ] * buff_obstacles[second_base + 2];
double A_3 = buff_obstacles[first_base ] * buff_obstacles[second_base + 1] - buff_obstacles[first_base + 1] * buff_obstacles[second_base ];
double A_s_q = sqrt(A_1 * A_1 + A_2 * A_2 + A_3 * A_3);
if(A_s_q > 0){
A_1 /= A_s_q;
A_2 /= A_s_q;
A_3 /= A_s_q;
}
else{
A_1 = 0;
A_2 = 0;
A_3 = 0;
}
for (uint32_t i = 0; i < k_con_num[k_con_base]; i++) {
A_con[con_base * A_con_width + i] = A_1 * frs_k_dep_G[(k_dep_G_base + i) * 3] + A_2 * frs_k_dep_G[(k_dep_G_base + i) * 3 + 1] + A_3 * frs_k_dep_G[(k_dep_G_base + i) * 3 + 2];
}
double d = A_1 * buff_obstacles[obs_base * 3] + A_2 * buff_obstacles[obs_base * 3 + 1] + A_3 * buff_obstacles[obs_base * 3 + 2];
double deltaD = 0;
for (uint32_t i = 1; i < buff_obstacle_length - k_con_num[k_con_base]; i++) {
deltaD += abs(A_1 * buff_obstacles[(obs_base + i) * 3] + A_2 * buff_obstacles[(obs_base + i) * 3 + 1] + A_3 * buff_obstacles[(obs_base + i) * 3 + 2]);
}
if (A_s_q > 0) {
d_con[con_base] = d;
delta_con[con_base] = deltaD;
}
else {
d_con[con_base] = 0;
delta_con[con_base] = A_BIG_NUMBER;
}
}
void rotatotopeArray::generate_self_constraints(uint32_t n_pairs_input, uint32_t* self_pairs_input){
n_pairs = n_pairs_input;
self_pairs = self_pairs_input;
A_con_self = new double*[n_pairs];
dev_A_con_self = new double*[n_pairs];
d_con_self = new double*[n_pairs];
dev_d_con_self = new double*[n_pairs];
delta_con_self = new double*[n_pairs];
dev_delta_con_self = new double*[n_pairs];
k_con_self = new bool*[n_pairs];
dev_k_con_self = new bool*[n_pairs];
k_con_num_self = new uint8_t*[n_pairs];
dev_k_con_num_self = new uint8_t*[n_pairs];
max_k_con_num_self = new uint32_t[n_pairs];
for(uint32_t pair_id = 0; pair_id < n_pairs; pair_id++){
uint32_t R1 = self_pairs[pair_id * 2];
uint32_t R2 = self_pairs[pair_id * 2 + 1];
uint32_t R1_length = RZ_length[R1];
uint32_t R2_length = RZ_length[R2];
uint32_t gen_zono_length = R2_length;
uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2;
uint32_t k_dep_length = R2_length;
// buffer the obstacle by k-independent generators
k_con_self[pair_id] = new bool[2 * (R2 + 1) * n_time_steps * k_dep_length];
hipMalloc((void**)&(dev_k_con_self[pair_id]), 2 * (R2 + 1) * n_time_steps * k_dep_length * sizeof(bool));
hipMemset(dev_k_con_self[pair_id], 0, 2 * (R2 + 1) * n_time_steps * k_dep_length * sizeof(bool));
k_con_num_self[pair_id] = new uint8_t[n_time_steps];
hipMalloc((void**)&(dev_k_con_num_self[pair_id]), n_time_steps * sizeof(uint8_t));
double* dev_gen_zono;
hipMalloc((void**)&dev_gen_zono, n_time_steps * gen_zono_length * Z_width * sizeof(double));
hipMemset(dev_gen_zono, 0, n_time_steps * gen_zono_length * Z_width * sizeof(double));
double* dev_k_dep_pt;
hipMalloc((void**)&dev_k_dep_pt, n_time_steps * k_dep_length * Z_width * sizeof(double));
hipMemset(dev_k_dep_pt, 0, n_time_steps * k_dep_length * Z_width * sizeof(double));
gen_zono_kernel << < n_time_steps, R2_length >> > (R1, R2, R1_length, R2_length, dev_RZ_stack[R1], dev_c_idx_stack[R1], dev_k_idx_stack[R1], dev_C_idx_stack[R1], dev_RZ_stack[R2], dev_c_idx_stack[R2], dev_k_idx_stack[R2], dev_C_idx_stack[R2], dev_gen_zono, dev_k_dep_pt, dev_k_con_self[pair_id], dev_k_con_num_self[pair_id]);
if(debugMode){
hipMemcpy(k_con_self[pair_id], dev_k_con_self[pair_id], 2 * (R2 + 1) * n_time_steps * k_dep_length * sizeof(bool), hipMemcpyDeviceToHost);
}
hipMemcpy(k_con_num_self[pair_id], dev_k_con_num_self[pair_id], n_time_steps * sizeof(uint8_t), hipMemcpyDeviceToHost);
// find the maximum width of A_con for memory allocation
max_k_con_num_self[pair_id] = 0;
for (uint32_t i = 0; i < n_time_steps; i++) {
if (k_con_num_self[pair_id][i] > max_k_con_num_self[pair_id]) {
max_k_con_num_self[pair_id] = k_con_num_self[pair_id][i];
}
}
// generate obstacles polynomials
hipMalloc((void**)&(dev_A_con_self[pair_id]), n_time_steps * constraint_length * max_k_con_num_self[pair_id] * sizeof(double));
hipMalloc((void**)&(dev_d_con_self[pair_id]), n_time_steps * constraint_length * sizeof(double));
hipMalloc((void**)&(dev_delta_con_self[pair_id]), n_time_steps * constraint_length * sizeof(double));
dim3 grid2(1, n_time_steps, 1);
polytope << < grid2, constraint_length >> > (gen_zono_length, k_dep_length, dev_gen_zono, dev_k_dep_pt, dev_k_con_num_self[pair_id], max_k_con_num_self[pair_id], dev_A_con_self[pair_id], dev_d_con_self[pair_id], dev_delta_con_self[pair_id]);
if(debugMode){
A_con_self[pair_id] = new double[n_time_steps * constraint_length * max_k_con_num_self[pair_id]];
hipMemcpy(A_con_self[pair_id], dev_A_con_self[pair_id], n_time_steps * constraint_length * max_k_con_num_self[pair_id] * sizeof(double), hipMemcpyDeviceToHost);
d_con_self[pair_id] = new double[n_time_steps * constraint_length];
hipMemcpy(d_con_self[pair_id], dev_d_con_self[pair_id], n_time_steps * constraint_length * sizeof(double), hipMemcpyDeviceToHost);
delta_con_self[pair_id] = new double[n_time_steps * constraint_length];
hipMemcpy(delta_con_self[pair_id], dev_delta_con_self[pair_id], n_time_steps * constraint_length * sizeof(double), hipMemcpyDeviceToHost);
}
else{
A_con_self[pair_id] = nullptr;
d_con_self[pair_id] = nullptr;
delta_con_self[pair_id] = nullptr;
}
hipFree(dev_gen_zono);
hipFree(dev_k_dep_pt);
}
}
__global__ void gen_zono_kernel(uint32_t link_id_1, uint32_t link_id_2, uint32_t RZ_length_1, uint32_t RZ_length_2, double* RZ_1, bool* c_idx_1, uint8_t* k_idx_1, uint8_t* C_idx_1, double* RZ_2, bool* c_idx_2, uint8_t* k_idx_2, uint8_t* C_idx_2, double* gen_zono, double* k_dep_pt, bool* k_con_self, uint8_t* k_con_num_self) {
uint32_t time_id = blockIdx.x;
uint32_t n_time_steps = gridDim.x;
uint32_t z_id = threadIdx.x;
uint32_t gen_zono_length = RZ_length_2;
uint32_t gen_zono_base = time_id * gen_zono_length;
uint32_t k_con_num_base = time_id;
uint32_t RZ_base_1 = time_id * RZ_length_1;
uint32_t k_start_1 = time_id * RZ_length_1;
uint32_t k_end_1 = (2 * (link_id_1 + 1) * n_time_steps + time_id) * RZ_length_1;
uint32_t k_step_1 = n_time_steps * RZ_length_1;
uint32_t RZ_base_2 = time_id * RZ_length_2;
uint32_t k_start_2 = time_id * RZ_length_2;
uint32_t k_end_2 = (2 * (link_id_2 + 1) * n_time_steps + time_id) * RZ_length_2;
uint32_t k_step_2 = n_time_steps * RZ_length_2;
// first, find kc_col for both links in a pair
__shared__ bool kc_info_1[MAX_RZ_LENGTH];
__shared__ bool kc_info_2[MAX_RZ_LENGTH];
if(z_id < RZ_length_1){
kc_info_1[z_id] = false;
for (uint32_t i = k_start_1; i < k_end_1; i += k_step_1) {
kc_info_1[z_id] &= (k_idx_1[i + z_id] != 1) || (C_idx_1[i + z_id] != 1);
}
kc_info_1[z_id] &= c_idx_1[RZ_base_1 + z_id];
}
kc_info_2[z_id] = false;
for (uint32_t i = k_start_2; i < k_end_2; i += k_step_2) {
kc_info_2[z_id] &= (k_idx_2[i + z_id] != 1) || (C_idx_2[i + z_id] != 1);
}
kc_info_2[z_id] &= c_idx_2[RZ_base_2 + z_id];
__syncthreads();
if (z_id == 0) { // process the center
for (uint32_t i = 0; i < 3; i++) {
gen_zono[gen_zono_base * 3 + i] = RZ_1[RZ_base_1 * 3 + i] - RZ_2[RZ_base_2 * 3 + i];
}
}
else if (z_id == 1) { // find k-dependent generators and complete k_con
uint8_t k_dep_num = 0;
for (uint32_t z = 1; z < RZ_length_1; z++) {
if(k_dep_num >= RZ_length_2){
break;
}
if (kc_info_1[z]) {
for (uint32_t j = k_start_1; j < k_end_1; j += k_step_1) {
k_con_self[j + k_dep_num] = (k_idx_1[j + z] == 2);
}
for (uint32_t i = 0; i < 3; i++) {
k_dep_pt[(RZ_base_2 + k_dep_num) * 3 + i] = -RZ_1[(RZ_base_1 + z) * 3 + i];
}
k_dep_num++;
}
}
for (uint32_t z = 1; z < RZ_length_2; z++) {
if(k_dep_num >= RZ_length_2){
break;
}
if (kc_info_2[z]) {
uint32_t kj = k_start_1;
for (uint32_t j = k_start_2; j < k_end_2; j += k_step_2) {
k_con_self[kj + k_dep_num] = (k_idx_2[j + z] == 2);
kj += k_step_1;
}
for (uint32_t i = 0; i < 3; i++) {
k_dep_pt[(RZ_base_2 + k_dep_num) * 3 + i] = RZ_2[(RZ_base_2 + z) * 3 + i];
}
k_dep_num++;
}
}
k_con_num_self[k_con_num_base] = k_dep_num;
}
else if (z_id == 2) { // find k-independent generators and complete gen_zono
uint8_t k_indep_num = 1;
// add a test here, reduce small generators to be a box
double reduced_generators[3];
reduced_generators[0] = 0;
reduced_generators[1] = 0;
reduced_generators[2] = 0;
for (uint32_t z = 1; z < RZ_length_1; z++) {
if (!kc_info_1[z]) {
double norm = 0;
for (uint32_t i = 0; i < 3; i++) {
norm += RZ_1[(RZ_base_1 + z) * 3 + i] * RZ_1[(RZ_base_1 + z) * 3 + i];
}
if(norm >= TOO_SMALL_POLYTOPE_JUDGE){
for (uint32_t i = 0; i < 3; i++) {
gen_zono[(gen_zono_base + k_indep_num) * 3 + i] = RZ_1[(RZ_base_1 + z) * 3 + i];
}
k_indep_num++;
}
else{
for (uint32_t i = 0; i < 3; i++) {
reduced_generators[i] += RZ_1[(RZ_base_1 + z) * 3 + i];
}
}
}
}
for (uint32_t z = 1; z < RZ_length_2; z++) {
if (!kc_info_2[z]) {
double norm = 0;
for (uint32_t i = 0; i < 3; i++) {
norm += RZ_2[(RZ_base_2 + z) * 3 + i] * RZ_2[(RZ_base_2 + z) * 3 + i];
}
if(norm >= TOO_SMALL_POLYTOPE_JUDGE){
for (uint32_t i = 0; i < 3; i++) {
gen_zono[(gen_zono_base + k_indep_num) * 3 + i] = RZ_2[(RZ_base_2 + z) * 3 + i];
}
k_indep_num++;
}
else{
for (uint32_t i = 0; i < 3; i++) {
reduced_generators[i] += RZ_2[(RZ_base_2 + z) * 3 + i];
}
}
if(k_indep_num >= gen_zono_length - 3){
break;
}
}
}
for (uint32_t i = 0; i < 3; i++) {
for (uint32_t j = 0; j < 3; j++){
if(i == j){
gen_zono[(gen_zono_base + k_indep_num) * 3 + j] = reduced_generators[i] + BUFFER_DIST;
}
else{
gen_zono[(gen_zono_base + k_indep_num) * 3 + j] = 0;
}
}
k_indep_num++;
}
}
}
void rotatotopeArray::evaluate_constraints(double* k_opt) {
start_t = clock();
if(n_obstacles > 0 && con != nullptr){
delete[] con;
delete[] jaco_con;
delete[] hess_con;
}
if(con_self != nullptr){
delete[] con_self;
delete[] jaco_con_self;
delete[] hess_con_self;
}
memcpy(current_k_opt, k_opt, n_links * 2 * sizeof(double));
double* dev_con = nullptr;
double* dev_jaco_con = nullptr;
double* dev_hess_con = nullptr;
if(n_obstacles > 0){
con = new double[n_links * n_obstacles * n_time_steps];
hipMalloc((void**)&dev_con, n_links * n_obstacles * n_time_steps * sizeof(double));
jaco_con = new double[n_links * n_obstacles * n_time_steps * n_links * 2];
hipMalloc((void**)&dev_jaco_con, n_links * n_obstacles * n_time_steps * n_links * 2 * sizeof(double));
hipMemset(dev_jaco_con, 0, n_links * n_obstacles * n_time_steps * n_links * 2 * sizeof(double));
hess_con = new double[n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1)];
hipMalloc((void**)&dev_hess_con, n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
hipMemset(dev_hess_con, 0, n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
}
con_self = new double[n_pairs * n_time_steps];
double* dev_con_self;
hipMalloc((void**)&dev_con_self, n_pairs * n_time_steps * sizeof(double));
jaco_con_self = new double[n_pairs * n_time_steps * n_links * 2];
double* dev_jaco_con_self;
hipMalloc((void**)&dev_jaco_con_self, n_pairs * n_time_steps * n_links * 2 * sizeof(double));
hipMemset(dev_jaco_con_self, 0, n_pairs * n_time_steps * n_links * 2 * sizeof(double));
hess_con_self = new double[n_pairs * n_time_steps * n_links * (n_links * 2 - 1)];
double* dev_hess_con_self;
hipMalloc((void**)&dev_hess_con_self, n_pairs * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
hipMemset(dev_hess_con_self, 0, n_pairs * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
double* lambda = new double[n_links * 2];
for (uint32_t joint_id = 0; joint_id < n_links * 2; joint_id++) {
lambda[joint_id] = c_k[joint_id] + k_opt[joint_id] / g_k[joint_id];
}
double* dev_lambda;
hipMalloc((void**)&dev_lambda, n_links * 2 * sizeof(double));
hipMemcpy(dev_lambda, lambda, n_links * 2 * sizeof(double), hipMemcpyHostToDevice);
double* dev_g_k;
hipMalloc((void**)&dev_g_k, n_links * 2 * sizeof(double));
hipMemcpy(dev_g_k, g_k, n_links * 2 * sizeof(double), hipMemcpyHostToDevice);
// obstacles constraint evaluation
if(n_obstacles > 0){
for (uint32_t link_id = 0; link_id < n_links; link_id++) {
uint32_t buff_obstacle_length = RZ_length[link_id] + 3;
uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2;
double* dev_con_result; // results of evaluation of constriants
bool* dev_index_factor; // whether the constraints are positive or negative
hipMalloc((void**)&dev_con_result, n_obstacles * n_time_steps * constraint_length * sizeof(double));
hipMalloc((void**)&dev_index_factor, n_obstacles * n_time_steps * constraint_length * sizeof(bool));
dim3 grid1(n_obstacles, n_time_steps, 1);
dim3 block1(constraint_length, 1, 1);
evaluate_constraints_kernel << < grid1, block1 >> > (dev_lambda, link_id, RZ_length[link_id], dev_A_con[link_id], max_k_con_num[link_id], dev_d_con[link_id], dev_delta_con[link_id], dev_k_con[link_id], dev_k_con_num[link_id], dev_con_result, dev_index_factor);
dim3 grid2(n_obstacles, n_time_steps, 1);
dim3 block2((link_id + 1) * 2, (link_id + 1) * 2, 1);
evaluate_gradient_kernel << < grid2, block2 >> > (dev_con_result, dev_index_factor, link_id, link_id, RZ_length[link_id], constraint_length, dev_lambda, dev_g_k, dev_A_con[link_id], max_k_con_num[link_id], dev_k_con[link_id], dev_k_con_num[link_id], n_links, dev_con, dev_jaco_con, dev_hess_con);
hipFree(dev_con_result);
hipFree(dev_index_factor);
}
}
// self intersection constraint evaluation
for (uint32_t pair_id = 0; pair_id < n_pairs; pair_id++) {
uint32_t R1 = self_pairs[pair_id * 2];
uint32_t R2 = self_pairs[pair_id * 2 + 1];
uint32_t R1_length = RZ_length[R1];
uint32_t R2_length = RZ_length[R2];
uint32_t gen_zono_length = R2_length;
uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2;
double* dev_con_result; // results of evaluation of constriants
bool* dev_index_factor; // whether the constraints are positive or negative
hipMalloc((void**)&dev_con_result, n_time_steps * constraint_length * sizeof(double));
hipMalloc((void**)&dev_index_factor, n_time_steps * constraint_length * sizeof(bool));
dim3 grid1(1, n_time_steps, 1);
dim3 block1(constraint_length, 1, 1);
evaluate_constraints_kernel << < grid1, block1 >> > (dev_lambda, R2, R1_length, dev_A_con_self[pair_id], max_k_con_num_self[pair_id], dev_d_con_self[pair_id], dev_delta_con_self[pair_id], dev_k_con_self[pair_id], dev_k_con_num_self[pair_id], dev_con_result, dev_index_factor);
dim3 grid2(1, n_time_steps, 1);
dim3 block2((R2 + 1) * 2, (R2 + 1) * 2, 1);
evaluate_gradient_kernel << < grid2, block2 >> > (dev_con_result, dev_index_factor, R2, pair_id, R1_length, constraint_length, dev_lambda, dev_g_k, dev_A_con_self[pair_id], max_k_con_num_self[pair_id], dev_k_con_self[pair_id], dev_k_con_num_self[pair_id], n_links, dev_con_self, dev_jaco_con_self, dev_hess_con_self);
hipFree(dev_con_result);
hipFree(dev_index_factor);
}
if(n_obstacles > 0){
hipMemcpy(con, dev_con, n_links * n_obstacles * n_time_steps * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dev_con);
hipMemcpy(jaco_con, dev_jaco_con, n_links * n_obstacles * n_time_steps * n_links * 2 * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dev_jaco_con);
hipMemcpy(hess_con, dev_hess_con, n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dev_hess_con);
}
hipMemcpy(con_self, dev_con_self, n_pairs * n_time_steps * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dev_con_self);
hipMemcpy(jaco_con_self, dev_jaco_con_self, n_pairs * n_time_steps * n_links * 2 * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dev_jaco_con_self);
hipMemcpy(hess_con_self, dev_hess_con_self, n_pairs * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dev_hess_con_self);
delete[] lambda;
hipFree(dev_lambda);
hipFree(dev_g_k);
end_t = clock();
mexPrintf("CUDA: constraint evaluation time: %.6f ms\n", 1000.0 * (end_t - start_t) / (double)(CLOCKS_PER_SEC));
}
__global__ void evaluate_constraints_kernel(double* lambda, uint32_t link_id, uint32_t RZ_length, double* A_con, uint32_t A_con_width, double* d_con, double* delta_con, bool* k_con, uint8_t* k_con_num, double* con_result, bool* index_factor) {
uint32_t obstacle_id = blockIdx.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t c_id = threadIdx.x;
uint32_t constraint_length = blockDim.x;
uint32_t k_con_num_base = time_id;
uint32_t con_base = (obstacle_id * n_time_steps + time_id) * constraint_length + c_id;
uint32_t con_result_base = (obstacle_id * n_time_steps + time_id) * constraint_length + c_id;
__shared__ double shared_lambda[6];
__shared__ double lambdas_prod[MAX_K_DEP_SIZE];
if (c_id < 2 * (link_id + 1)) {
shared_lambda[c_id] = lambda[c_id];
}
__syncthreads();
if (c_id < k_con_num[k_con_num_base]) {
double prod = 1.0;
for (uint32_t j = 0; j < 2 * (link_id + 1); j++) {
if (k_con[(j * n_time_steps + time_id) * RZ_length + c_id]) {
prod *= shared_lambda[j];
}
}
lambdas_prod[c_id] = prod;
}
__syncthreads();
if (delta_con[con_base] == A_BIG_NUMBER){
con_result[con_result_base] = -A_BIG_NUMBER;
index_factor[con_result_base] = false;
}
else{
double result = 0;
for (uint32_t p = 0; p < k_con_num[k_con_num_base]; p++){
result += lambdas_prod[p] * A_con[con_base * A_con_width + p];
}
double pos_result = result - d_con[con_base] - delta_con[con_base];
double neg_result = -result + d_con[con_base] - delta_con[con_base];
if(pos_result > neg_result){
con_result[con_result_base] = pos_result;
index_factor[con_result_base] = true;
}
else{
con_result[con_result_base] = neg_result;
index_factor[con_result_base] = false;
}
}
}
__global__ void evaluate_gradient_kernel(double* con_result, bool* index_factor, uint32_t link_id, uint32_t pos_id, uint32_t RZ_length, uint32_t constraint_length, double* lambda, double* g_k, double* A_con, uint32_t A_con_width, bool* k_con, uint8_t* k_con_num, uint32_t n_links, double* con, double* jaco_con, double* hess_con) {
uint32_t obstacle_id = blockIdx.x;
uint32_t n_obstacles = gridDim.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t joint_id = threadIdx.x;
uint32_t joint_id_sec = threadIdx.y;
uint32_t k_con_num_base = time_id;
uint32_t con_result_base = (obstacle_id * n_time_steps + time_id) * constraint_length;
__shared__ uint32_t max_idx;
uint32_t valu_con_base = (pos_id * n_obstacles + obstacle_id) * n_time_steps + time_id;
uint32_t jaco_con_base = ((pos_id * n_obstacles + obstacle_id) * n_time_steps + time_id) * n_links * 2;
uint32_t hess_con_base = ((pos_id * n_obstacles + obstacle_id) * n_time_steps + time_id) * n_links * (n_links * 2 - 1);
__shared__ double shared_lambda[6];
__shared__ double max_index_factor;
if (joint_id_sec == 0) {
if(joint_id == 0){
double maximum = -A_BIG_NUMBER - A_BIG_NUMBER;
max_idx = 0;
for (uint32_t i = 0; i < constraint_length; i++) {
double cur = con_result[con_result_base + i];
if (maximum < cur) {
max_idx = con_result_base + i;
maximum = cur;
}
}
con[valu_con_base] = -maximum + CONSERVATIVE_BUFFER;
if(index_factor[max_idx]){
max_index_factor = 1.0;
}
else{
max_index_factor = -1.0;
}
}
else if (joint_id <= 2 * (link_id + 1)) {
shared_lambda[joint_id - 1] = lambda[joint_id - 1];
}
}
__syncthreads();
if(joint_id == joint_id_sec){
double result = 0;
for (uint32_t p = 0; p < k_con_num[k_con_num_base]; p++) {
if(k_con[(joint_id * n_time_steps + time_id) * RZ_length + p]){
double prod = 1.0;
for (uint32_t j = 0; j < 2 * (link_id + 1); j++) {
if (j != joint_id && k_con[(j * n_time_steps + time_id) * RZ_length + p]) {
prod *= shared_lambda[j];
}
}
result += prod * max_index_factor * A_con[max_idx * A_con_width + p];
}
}
jaco_con[jaco_con_base + joint_id] = -result / g_k[joint_id];
}
else if(joint_id > joint_id_sec){
double result = 0;
for (uint32_t p = 0; p < k_con_num[k_con_num_base]; p++) {
if(k_con[(joint_id * n_time_steps + time_id) * RZ_length + p] && k_con[(joint_id_sec * n_time_steps + time_id) * RZ_length + p]){
double prod = 1.0;
for (uint32_t j = 0; j < 2 * (link_id + 1); j++) {
if (j != joint_id && j != joint_id_sec && k_con[(j * n_time_steps + time_id) * RZ_length + p]) {
prod *= shared_lambda[j];
}
}
result += prod * max_index_factor * A_con[max_idx * A_con_width + p];
}
}
uint32_t hess_index = 0;
for(uint32_t i = 0; i < joint_id_sec; i++){
hess_index += n_links * 2 - 1 - i;
}
hess_con[hess_con_base + joint_id * (joint_id - 1) / 2 + joint_id_sec] = -result / g_k[joint_id] / g_k[joint_id_sec];
}
}
rotatotopeArray::~rotatotopeArray() {
hipFree(dev_Z);
if (n_links > 0) {
hipFree(dev_RZ);
hipFree(dev_c_idx);
hipFree(dev_k_idx);
hipFree(dev_C_idx);
}
if (c_k != nullptr) {
delete[] c_k;
delete[] g_k;
}
if (dev_RZ_stack != nullptr) {
for (uint32_t i = 0; i < n_links; i++) {
delete[] RZ_stack[i];
}
delete[] RZ_stack;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_RZ_stack[i]);
}
delete[] dev_RZ_stack;
for (uint32_t i = 0; i < n_links; i++) {
delete[] c_idx_stack[i];
}
delete[] c_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_c_idx_stack[i]);
}
delete[] dev_c_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
delete[] k_idx_stack[i];
}
delete[] k_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_k_idx_stack[i]);
}
delete[] dev_k_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
delete[] C_idx_stack[i];
}
delete[] C_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_C_idx_stack[i]);
}
delete[] dev_C_idx_stack;
delete[] RZ_length;
}
if (n_obstacles > 0 && A_con != nullptr) {
for (uint32_t i = 0; i < n_links; i++) {
delete[] A_con[i];
}
delete[] A_con;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_A_con[i]);
}
delete[] dev_A_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] d_con[i];
}
delete[] d_con;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_d_con[i]);
}
delete[] dev_d_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] delta_con[i];
}
delete[] delta_con;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_delta_con[i]);
}
delete[] dev_delta_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] k_con[i];
}
delete[] k_con;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_k_con[i]);
}
delete[] dev_k_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] k_con_num[i];
}
delete[] k_con_num;
for (uint32_t i = 0; i < n_links; i++) {
hipFree(dev_k_con_num[i]);
}
delete[] dev_k_con_num;
delete[] max_k_con_num;
}
if(dev_A_con_self != nullptr){
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] A_con_self[i];
}
delete[] A_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
hipFree(dev_A_con_self[i]);
}
delete[] dev_A_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] d_con_self[i];
}
delete[] d_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
hipFree(dev_d_con_self[i]);
}
delete[] dev_d_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] delta_con_self[i];
}
delete[] delta_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
hipFree(dev_delta_con_self[i]);
}
delete[] dev_delta_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] k_con_self[i];
}
delete[] k_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
hipFree(dev_k_con_self[i]);
}
delete[] dev_k_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] k_con_num_self[i];
}
delete[] k_con_num_self;
for (uint32_t i = 0; i < n_pairs; i++) {
hipFree(dev_k_con_num_self[i]);
}
delete[] dev_k_con_num_self;
delete[] max_k_con_num_self;
}
if (debug_RZ != nullptr) {
delete[] debug_RZ;
delete[] debug_c_idx;
delete[] debug_k_idx;
}
if (con != nullptr) {
delete[] con;
delete[] jaco_con;
delete[] hess_con;
}
if (con_self != nullptr) {
delete[] con_self;
delete[] jaco_con_self;
delete[] hess_con_self;
}
delete[] current_k_opt;
}
#endif // !ROTATOTOPE_ARRAY_CPPs
|
414e6cc0fbeee70d5db8cc1f064e309df6f60c31.cu
|
/*
Author: Bohao Zhang
Oct. 29 2019
arm_planning mex
a cuda array for a cluster of rotatotopes
*/
#ifndef ROTATOTOPE_ARRAY_CPP
#define ROTATOTOPE_ARRAY_CPP
#include "rotatotopeArray.h"
rotatotopeArray::rotatotopeArray(uint32_t n_links_input, uint32_t n_time_steps_input, uint32_t joint_per_link_input, double* R_input, double* dev_R_input, uint32_t R_unit_length_input, uint8_t* dev_rot_axes_input, double* Z_input, uint32_t Z_width_input, uint32_t Z_length_input, uint32_t reduce_order_input, double* g_k_input) {
debugMode = false;
n_links = n_links_input;
n_time_steps = n_time_steps_input;
joint_per_link = joint_per_link_input;
dev_R = dev_R_input;
R_unit_length = R_unit_length_input;
dev_rot_axes = dev_rot_axes_input;
reduce_order = reduce_order_input;
if (n_links > 0) {
Z = Z_input;
Z_length = Z_length_input;
Z_width = Z_width_input;
Z_unit_length = Z_length / n_links;
cudaMalloc((void**)&dev_Z, Z_width * Z_length * sizeof(double));
cudaMemcpy(dev_Z, Z, Z_width * Z_length * sizeof(double), cudaMemcpyHostToDevice);
c_k = new double[n_links * 2];
g_k = new double[n_links * 2];
for (uint32_t joint_id = 0; joint_id < n_links * 2; joint_id++) {
uint32_t R_id_start = ((joint_id + 1) * n_time_steps - 1) * R_unit_length;
c_k[joint_id] = R_input[R_id_start * 5 + k_dim];
g_k[joint_id] = g_k_input[joint_id];
}
double* dev_RZ_new;
cudaMalloc((void**)&dev_RZ, n_links * n_time_steps * reduce_order * Z_width * sizeof(double));
cudaMalloc((void**)&dev_RZ_new, n_links * n_time_steps * reduce_order * R_unit_length * Z_width * sizeof(double));
bool *dev_c_idx_new;
uint8_t *dev_k_idx_new, *dev_C_idx_new;
cudaMalloc((void**)&dev_c_idx, n_links * n_time_steps * reduce_order * sizeof(bool));
cudaMemset(dev_c_idx, 0, n_links * n_time_steps * reduce_order * sizeof(bool));
cudaMalloc((void**)&dev_c_idx_new, n_links * n_time_steps * reduce_order * R_unit_length * sizeof(bool));
cudaMemset(dev_c_idx_new, 0, n_links * n_time_steps * reduce_order * R_unit_length * sizeof(bool));
cudaMalloc((void**)&dev_k_idx, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
cudaMemset(dev_k_idx, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
cudaMalloc((void**)&dev_k_idx_new, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
cudaMemset(dev_k_idx_new, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
cudaMalloc((void**)&dev_C_idx, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
cudaMemset(dev_C_idx, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * sizeof(uint8_t));
cudaMalloc((void**)&dev_C_idx_new, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
cudaMemset(dev_C_idx_new, 0, n_links * (n_links + 1) * n_time_steps * reduce_order * R_unit_length * sizeof(uint8_t));
dim3 grid1(n_links, n_time_steps, 1);
dim3 block1(reduce_order, Z_width, 1);
initialize_RZ_kernel << < grid1, block1 >> > (dev_Z, Z_unit_length, reduce_order, dev_RZ, dev_c_idx);
for (int link = n_links; link > 0; link--) {
for (int joint_offset = joint_per_link - 1; joint_offset >= 0; joint_offset--) {
dim3 grid2(link, n_time_steps, 1);
dim3 block2(reduce_order, R_unit_length, 1);
multiply_kernel << < grid2, block2 >> > (dev_rot_axes, n_links - link, joint_offset, reduce_order, dev_RZ, dev_R, dev_c_idx, dev_k_idx, dev_C_idx, dev_RZ_new, dev_c_idx_new, dev_k_idx_new, dev_C_idx_new);
reduce_kernel << < grid2, (reduce_order * R_unit_length) >> > (dev_RZ_new, dev_c_idx_new, dev_k_idx_new, dev_C_idx_new, n_links - link, reduce_order, dev_RZ, dev_c_idx, dev_k_idx, dev_C_idx);
}
}
cudaFree(dev_RZ_new);
cudaFree(dev_c_idx_new);
cudaFree(dev_k_idx_new);
cudaFree(dev_C_idx_new);
}
else {
c_k = nullptr;
g_k = nullptr;
Z = nullptr;
dev_Z = nullptr;
dev_RZ = nullptr;
dev_c_idx = nullptr;
dev_k_idx = nullptr;
dev_C_idx = nullptr;
}
n_pairs = 0;
self_pairs = nullptr;
dev_RZ_stack = nullptr;
dev_c_idx_stack = nullptr;
dev_k_idx_stack = nullptr;
dev_C_idx_stack = nullptr;
RZ_length = nullptr;
n_obstacles = 0;
A_con = nullptr;
dev_A_con = nullptr;
d_con = nullptr;
dev_d_con = nullptr;
delta_con = nullptr;
dev_delta_con = nullptr;
k_con = nullptr;
dev_k_con = nullptr;
k_con_num = nullptr;
dev_k_con_num = nullptr;
max_k_con_num = nullptr;
A_con_self = nullptr;
dev_A_con_self = nullptr;
d_con_self = nullptr;
dev_d_con_self = nullptr;
delta_con_self = nullptr;
dev_delta_con_self = nullptr;
k_con_self = nullptr;
dev_k_con_self = nullptr;
k_con_num_self = nullptr;
dev_k_con_num_self = nullptr;
max_k_con_num_self = nullptr;
current_k_opt = new double[n_links * 2];
con = nullptr;
jaco_con = nullptr;
hess_con = nullptr;
con_self = nullptr;
jaco_con_self = nullptr;
hess_con_self = nullptr;
}
__global__ void initialize_RZ_kernel(double* link_Z, uint32_t link_Z_length, uint32_t reduce_order, double* RZ, bool* c_idx) {
uint32_t link_id = blockIdx.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t w_id = threadIdx.y;
uint32_t Z_width = blockDim.y;
if (z_id < link_Z_length) {
RZ[((link_id * n_time_steps + time_id) * reduce_order + z_id) * Z_width + w_id] = link_Z[(link_id * link_Z_length + z_id) * Z_width + w_id];
}
else {
RZ[((link_id * n_time_steps + time_id) * reduce_order + z_id) * Z_width + w_id] = 0;
}
if (z_id == 0) c_idx[(link_id * n_time_steps + time_id) * reduce_order] = true;
}
__global__ void multiply_kernel(uint8_t* rot_axes, uint32_t link_offset, uint32_t joint_offset, uint32_t reduce_order, double* RZ, double* R, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx, double* RZ_new, bool* c_idx_new, uint8_t* k_idx_new, uint8_t* C_idx_new) {
uint32_t link_id = blockIdx.x + link_offset;
uint32_t joint_id = blockIdx.x * 2 + joint_offset;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t r_id = threadIdx.y;
uint32_t R_unit_length = blockDim.y;
uint32_t mul_Z = (link_id * n_time_steps + time_id) * reduce_order + z_id;
uint32_t mul_R = (joint_id * n_time_steps + time_id) * R_unit_length + r_id;
uint32_t mul_RZ = ((link_id * n_time_steps + time_id) * reduce_order + z_id) * R_unit_length + r_id;
uint8_t rot_axis = rot_axes[joint_id];
bool if_center = (r_id == 0); // true if center, false if not
if (rot_axis == 1) {
RZ_new[mul_RZ * 3] = if_center ? RZ[mul_Z * 3] : 0;
RZ_new[mul_RZ * 3 + 1] = R[mul_R * 5] * RZ[mul_Z * 3 + 1] - R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 2];
RZ_new[mul_RZ * 3 + 2] = R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 1] + R[mul_R * 5] * RZ[mul_Z * 3 + 2];
}
else if (rot_axis == 2) {
RZ_new[mul_RZ * 3] = R[mul_R * 5] * RZ[mul_Z * 3] + R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 2];
RZ_new[mul_RZ * 3 + 1] = if_center ? RZ[mul_Z * 3 + 1] : 0;
RZ_new[mul_RZ * 3 + 2] = R[mul_R * 5] * RZ[mul_Z * 3 + 2] - R[mul_R * 5 + 1] * RZ[mul_Z * 3];
}
else {
RZ_new[mul_RZ * 3] = R[mul_R * 5] * RZ[mul_Z * 3] - R[mul_R * 5 + 1] * RZ[mul_Z * 3 + 1];
RZ_new[mul_RZ * 3 + 1] = R[mul_R * 5 + 1] * RZ[mul_Z * 3] + R[mul_R * 5] * RZ[mul_Z * 3 + 1];
RZ_new[mul_RZ * 3 + 2] = if_center ? RZ[mul_Z * 3 + 2] : 0;
}
c_idx_new[mul_RZ] = c_idx[mul_Z];
// update k_idx for this joint
uint32_t k_id = link_id * (link_id + 1) + joint_id;
uint32_t mul_k = (k_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
if (R[mul_R * 5 + k_dim] != 0) {
k_idx_new[mul_k] = 2;
}
else {
k_idx_new[mul_k] = 1;
}
// update k_idx for previous joints
for (uint32_t joint_k_id = joint_id + 1; joint_k_id < (link_id + 1) * 2; joint_k_id++) {
k_id = link_id * (link_id + 1) + joint_k_id;
uint32_t mul_z = (k_id * n_time_steps + time_id) * reduce_order + z_id;
mul_k = (k_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
k_idx_new[mul_k] = k_idx[mul_z];
}
// update C_idx for this joint
uint32_t C_id = link_id * (link_id + 1) + joint_id;
uint32_t mul_C = (C_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
if (r_id == 0) {
C_idx_new[mul_C] = 2;
}
else {
C_idx_new[mul_C] = 1;
}
// update C_idx for previous joints
for (uint32_t joint_k_id = joint_id + 1; joint_k_id < (link_id + 1) * 2; joint_k_id++) {
C_id = link_id * (link_id + 1) + joint_k_id;
uint32_t mul_z = (C_id * n_time_steps + time_id) * reduce_order + z_id;
mul_C = (C_id * n_time_steps + time_id) * reduce_order * R_unit_length + (z_id * R_unit_length + r_id);
C_idx_new[mul_C] = C_idx[mul_z];
}
}
__global__ void reduce_kernel(double* RZ_new, bool* c_idx_new, uint8_t* k_idx_new, uint8_t* C_idx_new, uint32_t link_offset, uint32_t reduce_order, double* RZ, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx) {
uint32_t link_id = blockIdx.x + link_offset;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t norm_length = blockDim.x;
uint32_t mul_Z = (link_id * n_time_steps + time_id) * norm_length + z_id; // we never reduce the center
__shared__ double RZ_norm[MAX_NORM_SIZE];
__shared__ uint32_t RZ_id[MAX_NORM_SIZE];
RZ_norm[z_id] = 0;
double norm;
for (uint32_t i = 0; i < 3; i++) {
norm = RZ_new[mul_Z * 3 + i];
RZ_norm[z_id] += norm * norm;
}
RZ_id[z_id] = z_id;
__syncthreads();
uint32_t base = (link_id * n_time_steps + time_id) * norm_length; // indeces offset for RZ_new
uint32_t k_start = ((link_id * (link_id + 1)) * n_time_steps + time_id) * norm_length;
uint32_t k_end = (((link_id + 1) * (link_id + 2)) * n_time_steps + time_id) * norm_length;
uint32_t k_step = n_time_steps * norm_length;
if (z_id == 0) {
// choose the vectors whose norm is among (reduce_order - 3) largest
uint32_t high = norm_length;
uint32_t low = 1;
uint32_t k = reduce_order - 3;
uint32_t i, j;
while (low < high) {
i = low;
j = high - 1;
double pivot = RZ_norm[low];
while (i <= j) {
while (i <= j && RZ_norm[i] >= pivot)
i++;
while (i <= j && RZ_norm[j] < pivot)
j--;
if (i < j) {
double temp_double = RZ_norm[i];
RZ_norm[i] = RZ_norm[j];
RZ_norm[j] = temp_double;
uint32_t temp = RZ_id[i];
RZ_id[i] = RZ_id[j];
RZ_id[j] = temp;
i++;
j--;
}
}
double temp_double = RZ_norm[low];
RZ_norm[low] = RZ_norm[j];
RZ_norm[j] = temp_double;
uint32_t temp = RZ_id[low];
RZ_id[low] = RZ_id[j];
RZ_id[j] = temp;
if (j == k - 1)
break;
else if (j < k - 1)
low = j + 1;
else
high = j;
}
}
__syncthreads();
// at this point, the first (reduce_order - 3) entries in RZ_new are the (reduce_order - 3) largest ones
// we choose them as entries for RZ after reduction.
// we compress the rest of the entries to a box with 3 generators
uint32_t base_ori = (link_id * n_time_steps + time_id) * reduce_order; // indeces offset for RZ
uint32_t k_start_ori = ((link_id * (link_id + 1)) * n_time_steps + time_id) * reduce_order;
uint32_t k_end_ori = (((link_id + 1) * (link_id + 2)) * n_time_steps + time_id) * reduce_order;
uint32_t k_step_ori = n_time_steps * reduce_order;
if (z_id < reduce_order - 3) { // copy these generators to RZ
uint32_t sorted_id = RZ_id[z_id];
c_idx[base_ori + z_id] = c_idx_new[base + sorted_id];
for (uint32_t h = 0; h < 3; h++) {
RZ[(base_ori + z_id) * 3 + h] = RZ_new[(base + sorted_id) * 3 + h];
}
uint32_t k_pivot = k_start, k_pivot_ori = k_start_ori;
while (k_pivot != k_end && k_pivot_ori != k_end_ori) {
k_idx[k_pivot_ori + z_id] = k_idx_new[k_pivot + sorted_id];
k_pivot += k_step;
k_pivot_ori += k_step_ori;
}
uint32_t C_pivot = k_start, C_pivot_ori = k_start_ori;
while (C_pivot != k_end && C_pivot_ori != k_end_ori) {
C_idx[C_pivot_ori + z_id] = C_idx_new[C_pivot + sorted_id];
C_pivot += k_step;
C_pivot_ori += k_step_ori;
}
}
else if (reduce_order - 3 <= z_id && z_id < reduce_order) { // construct a 3-d box for the rest of the generators
uint32_t box_id = (z_id + 3) - reduce_order;
double entry_sum = 0;
for (uint32_t h = reduce_order - 3; h < norm_length; h++) {
uint32_t sorted_id = RZ_id[h];
entry_sum += abs(RZ_new[(base + sorted_id) * 3 + box_id]);
}
for (uint32_t h = 0; h < 3; h++) {
if (h == box_id) {
RZ[(base_ori + z_id) * 3 + h] = entry_sum;
}
else {
RZ[(base_ori + z_id) * 3 + h] = 0;
}
}
c_idx[base_ori + z_id] = false;
for (uint32_t h = k_start_ori; h < k_end_ori; h += k_step_ori) {
k_idx[h + z_id] = 1;
}
for (uint32_t h = k_start_ori; h < k_end_ori; h += k_step_ori) {
C_idx[h + z_id] = 1;
}
}
}
void rotatotopeArray::stack(rotatotopeArray &EEs, rotatotopeArray &base) {
RZ_stack = new double*[n_links];
dev_RZ_stack = new double*[n_links];
c_idx_stack = new bool*[n_links];
dev_c_idx_stack = new bool*[n_links];
k_idx_stack = new uint8_t*[n_links];
dev_k_idx_stack = new uint8_t*[n_links];
C_idx_stack = new uint8_t*[n_links];
dev_C_idx_stack = new uint8_t*[n_links];
RZ_length = new uint32_t[n_links];
for (uint32_t link_id = 0; link_id < n_links; link_id++) {
RZ_length[link_id] = reduce_order + link_id * (EEs.reduce_order - 1) + base.reduce_order - 1;
RZ_stack[link_id] = nullptr;
cudaMalloc((void**)&(dev_RZ_stack[link_id]), n_time_steps * RZ_length[link_id] * Z_width * sizeof(double));
c_idx_stack[link_id] = nullptr;
cudaMalloc((void**)&(dev_c_idx_stack[link_id]), n_time_steps * RZ_length[link_id] * sizeof(bool));
k_idx_stack[link_id] = nullptr;
cudaMalloc((void**)&(dev_k_idx_stack[link_id]), 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
cudaMemset(dev_k_idx_stack, 0, 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
C_idx_stack[link_id] = nullptr;
cudaMalloc((void**)&(dev_C_idx_stack[link_id]), 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
cudaMemset(dev_C_idx_stack, 0, 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t));
// copy dev_RZ to dev_RZ_stack
dim3 grid1(n_time_steps, 1, 1);
dim3 block1(reduce_order, Z_width, 1);
copy_kernel << < grid1, block1 >> > (link_id, dev_RZ, dev_c_idx, dev_k_idx, dev_C_idx, reduce_order, EEs.reduce_order, dev_RZ_stack[link_id], dev_c_idx_stack[link_id], dev_k_idx_stack[link_id], dev_C_idx_stack[link_id]);
// stack with EE
for (int EE_id = link_id - 1; EE_id >= 0; EE_id--) {
dim3 grid2(n_time_steps, 1, 1);
dim3 block2(EEs.reduce_order, Z_width, 1);
stack_kernel << < grid2, block2 >> > (link_id, EE_id, EE_id, reduce_order, EEs.reduce_order, dev_RZ_stack[link_id], EEs.dev_RZ, dev_c_idx_stack[link_id], EEs.dev_c_idx, dev_k_idx_stack[link_id], EEs.dev_k_idx, dev_C_idx_stack[link_id], EEs.dev_C_idx);
}
// stack with base
dim3 grid3(n_time_steps, 1, 1);
dim3 block3(base.reduce_order, Z_width, 1);
stack_kernel << < grid3, block3 >> > (link_id, 0, link_id, reduce_order, base.reduce_order, dev_RZ_stack[link_id], base.dev_RZ, dev_c_idx_stack[link_id], base.dev_c_idx, dev_k_idx_stack[link_id], base.dev_k_idx, dev_C_idx_stack[link_id], base.dev_C_idx);
// origin shift
origin_shift_kernel <<< n_time_steps, 1 >>> (RZ_length[link_id], dev_RZ_stack[link_id]);
}
uint32_t link_id = 0;
if(debugMode){
debug_RZ = new double[n_time_steps * RZ_length[link_id] * Z_width];
cudaMemcpy(debug_RZ, dev_RZ_stack[link_id], n_time_steps * RZ_length[link_id] * Z_width * sizeof(double), cudaMemcpyDeviceToHost);
debug_c_idx = new bool[n_time_steps * RZ_length[link_id]];
cudaMemcpy(debug_c_idx, dev_c_idx_stack[link_id], n_time_steps * RZ_length[link_id] * sizeof(bool), cudaMemcpyDeviceToHost);
debug_k_idx = new uint8_t[2 * (link_id + 1) * n_time_steps * RZ_length[link_id]];
cudaMemcpy(debug_k_idx, dev_k_idx_stack[link_id], 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t), cudaMemcpyDeviceToHost);
debug_C_idx = new uint8_t[2 * (link_id + 1) * n_time_steps * RZ_length[link_id]];
cudaMemcpy(debug_C_idx, dev_C_idx_stack[link_id], 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(uint8_t), cudaMemcpyDeviceToHost);
}
else{
debug_RZ = nullptr;
debug_c_idx = nullptr;
debug_k_idx = nullptr;
debug_C_idx = nullptr;
}
}
__global__ void copy_kernel(uint32_t link_id, double* RZ, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx, uint32_t link_reduce_order, uint32_t point_reduce_order, double* RZ_stack, bool* c_idx_stack, uint8_t* k_idx_stack, uint8_t* C_idx_stack) {
uint32_t time_id = blockIdx.x;
uint32_t n_time_steps = gridDim.x;
uint32_t Z_id = threadIdx.x;
uint32_t z_id = threadIdx.y;
uint32_t RZ_length = link_reduce_order + (link_id + 1) * (point_reduce_order - 1);
uint32_t copy_Z = time_id * RZ_length + Z_id;
uint32_t copy_k_start = time_id * RZ_length + Z_id;
uint32_t copy_k_step = n_time_steps * RZ_length;
uint32_t link_Z = (link_id * n_time_steps + time_id) * link_reduce_order + Z_id;
uint32_t link_k_start = ((link_id * (link_id + 1)) * n_time_steps + time_id) * link_reduce_order + Z_id;
uint32_t link_k_end = (((link_id + 1) * (link_id + 2)) * n_time_steps + time_id) * link_reduce_order + Z_id;
uint32_t link_k_step = n_time_steps * link_reduce_order;
RZ_stack[copy_Z * 3 + z_id] = RZ[link_Z * 3 + z_id];
if (z_id == 0) {
c_idx_stack[copy_Z] = c_idx[link_Z];
uint32_t copy_k = copy_k_start;
for (uint32_t link_k = link_k_start; link_k < link_k_end; link_k += link_k_step) {
k_idx_stack[copy_k] = k_idx[link_k];
copy_k += copy_k_step;
}
uint32_t copy_C = copy_k_start;
for (uint32_t link_C = link_k_start; link_C < link_k_end; link_C += link_k_step) {
C_idx_stack[copy_C] = C_idx[link_C];
copy_C += copy_k_step;
}
}
}
__global__ void stack_kernel(uint32_t link_id, uint32_t EE_id, uint32_t stack_offset, uint32_t link_reduce_order, uint32_t point_reduce_order, double* RZ_stack, double* EE_RZ, bool* c_idx_stack, bool* EE_c_idx, uint8_t* k_idx_stack, uint8_t* EE_k_idx, uint8_t* C_idx_stack, uint8_t* EE_C_idx) {
uint32_t time_id = blockIdx.x;
uint32_t n_time_steps = gridDim.x;
uint32_t Z_id = threadIdx.x;
uint32_t z_id = threadIdx.y;
uint32_t RZ_length = link_reduce_order + (link_id + 1) * (point_reduce_order - 1);
uint32_t stack_Z = time_id * RZ_length + Z_id;
uint32_t stack_k_start = time_id * RZ_length + Z_id;
uint32_t stack_k_end = (2 * (link_id + 1) * n_time_steps + time_id) * RZ_length + Z_id;
uint32_t stack_k_step = n_time_steps * RZ_length;
uint32_t EE_Z = (EE_id * n_time_steps + time_id) * point_reduce_order + Z_id;
uint32_t EE_k_start = ((EE_id * (EE_id + 1)) * n_time_steps + time_id) * point_reduce_order + Z_id;
uint32_t EE_k_end = (((EE_id + 1) * (EE_id + 2)) * n_time_steps + time_id) * point_reduce_order + Z_id;
uint32_t EE_k_step = n_time_steps * point_reduce_order;
if (Z_id == 0) { // add the center
RZ_stack[stack_Z * 3 + z_id] += EE_RZ[EE_Z * 3 + z_id];
if (z_id == 0) {
c_idx_stack[stack_Z] = true;
}
}
else { // stack the generators
uint32_t stack_offset_length = link_reduce_order - 1 + stack_offset * (point_reduce_order - 1);
RZ_stack[(stack_Z + stack_offset_length) * 3 + z_id] = EE_RZ[EE_Z * 3 + z_id];
if (z_id == 0) {
c_idx_stack[(stack_Z + stack_offset_length)] = EE_c_idx[EE_Z];
uint32_t EE_k = EE_k_start;
for (uint32_t stack_k = stack_k_start + stack_offset_length; stack_k < stack_k_end + stack_offset_length; stack_k += stack_k_step) {
if (EE_k < EE_k_end) {
k_idx_stack[stack_k] = EE_k_idx[EE_k];
}
else {
k_idx_stack[stack_k] = 0;
}
EE_k += EE_k_step;
}
uint32_t EE_C = EE_k_start;
for (uint32_t stack_C = stack_k_start + stack_offset_length; stack_C < stack_k_end + stack_offset_length; stack_C += stack_k_step) {
if (EE_C < EE_k_end) {
C_idx_stack[stack_C] = EE_C_idx[EE_C];
}
else {
C_idx_stack[stack_C] = 0;
}
EE_C += EE_k_step;
}
}
}
}
__global__ void origin_shift_kernel(uint32_t RZ_length, double* RZ_stack){
uint32_t time_id = blockIdx.x;
uint32_t stack_Z = time_id * RZ_length;
RZ_stack[stack_Z * 3 ] += ORIGIN_SHIFT_X;
RZ_stack[stack_Z * 3 + 1] += ORIGIN_SHIFT_Y;
RZ_stack[stack_Z * 3 + 2] += ORIGIN_SHIFT_Z;
}
void rotatotopeArray::generate_constraints(uint32_t n_obstacles_in, double* OZ, uint32_t OZ_width, uint32_t OZ_length) {
// obstacle constraints
n_obstacles = n_obstacles_in;
if(n_obstacles == 0) return;
uint32_t OZ_unit_length = OZ_length / n_obstacles;
double* dev_OZ;
cudaMalloc((void**)&dev_OZ, OZ_length * OZ_width * sizeof(double));
cudaMemcpy(dev_OZ, OZ, OZ_length * OZ_width * sizeof(double), cudaMemcpyHostToDevice);
A_con = new double*[n_links];
dev_A_con = new double*[n_links];
d_con = new double*[n_links];
dev_d_con = new double*[n_links];
delta_con = new double*[n_links];
dev_delta_con = new double*[n_links];
k_con = new bool*[n_links];
dev_k_con = new bool*[n_links];
k_con_num = new uint8_t*[n_links];
dev_k_con_num = new uint8_t*[n_links];
max_k_con_num = new uint32_t[n_links];
for (uint32_t link_id = 0; link_id < n_links; link_id++) {
uint32_t buff_obstacle_length = RZ_length[link_id] + (OZ_unit_length - 1);
uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2;
// buffer the obstacle by k-independent generators
k_con[link_id] = new bool[2 * (link_id + 1) * n_time_steps * RZ_length[link_id]];
cudaMalloc((void**)&(dev_k_con[link_id]), 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(bool));
k_con_num[link_id] = new uint8_t[n_time_steps];
cudaMalloc((void**)&(dev_k_con_num[link_id]), n_time_steps * sizeof(uint8_t));
double* dev_buff_obstacles;
cudaMalloc((void**)&dev_buff_obstacles, n_obstacles * n_time_steps * buff_obstacle_length * 3 * sizeof(double));
cudaMemset(dev_buff_obstacles, 0, n_obstacles * n_time_steps * buff_obstacle_length * 3 * sizeof(double));
double* dev_frs_k_dep_G;
cudaMalloc((void**)&dev_frs_k_dep_G, n_time_steps * RZ_length[link_id] * 3 * sizeof(double));
cudaMemset(dev_frs_k_dep_G, 0, n_time_steps * RZ_length[link_id] * 3 * sizeof(double));
dim3 grid2(n_obstacles, n_time_steps, 1);
buff_obstacles_kernel << < grid2, RZ_length[link_id] >> > (link_id, RZ_length[link_id], dev_RZ_stack[link_id], dev_c_idx_stack[link_id], dev_k_idx_stack[link_id], dev_C_idx_stack[link_id], dev_OZ, OZ_unit_length, dev_buff_obstacles, dev_frs_k_dep_G, dev_k_con[link_id], dev_k_con_num[link_id]);
if(debugMode){
cudaMemcpy(k_con[link_id], dev_k_con[link_id], 2 * (link_id + 1) * n_time_steps * RZ_length[link_id] * sizeof(bool), cudaMemcpyDeviceToHost);
}
cudaMemcpy(k_con_num[link_id], dev_k_con_num[link_id], n_time_steps * sizeof(uint8_t), cudaMemcpyDeviceToHost);
// find the maximum width of A_con for memory allocation
max_k_con_num[link_id] = 0;
for (uint32_t i = 0; i < n_time_steps; i++) {
if (k_con_num[link_id][i] > max_k_con_num[link_id]) {
max_k_con_num[link_id] = k_con_num[link_id][i];
}
}
// generate obstacles polynomials
cudaMalloc((void**)&(dev_A_con[link_id]), n_obstacles * n_time_steps * constraint_length * max_k_con_num[link_id] * sizeof(double));
cudaMalloc((void**)&(dev_d_con[link_id]), n_obstacles * n_time_steps * constraint_length * sizeof(double));
cudaMalloc((void**)&(dev_delta_con[link_id]), n_obstacles * n_time_steps * constraint_length * sizeof(double));
dim3 grid3(n_obstacles, n_time_steps, 1);
polytope << < grid2, constraint_length >> > (buff_obstacle_length, RZ_length[link_id], dev_buff_obstacles, dev_frs_k_dep_G, dev_k_con_num[link_id], max_k_con_num[link_id], dev_A_con[link_id], dev_d_con[link_id], dev_delta_con[link_id]);
if(debugMode){
A_con[link_id] = new double[n_obstacles * n_time_steps * constraint_length * max_k_con_num[link_id]];
cudaMemcpy(A_con[link_id], dev_A_con[link_id], n_obstacles * n_time_steps * constraint_length * max_k_con_num[link_id] * sizeof(double), cudaMemcpyDeviceToHost);
d_con[link_id] = new double[n_obstacles * n_time_steps * constraint_length];
cudaMemcpy(d_con[link_id], dev_d_con[link_id], n_obstacles * n_time_steps * constraint_length * sizeof(double), cudaMemcpyDeviceToHost);
delta_con[link_id] = new double[n_obstacles * n_time_steps * constraint_length];
cudaMemcpy(delta_con[link_id], dev_delta_con[link_id], n_obstacles * n_time_steps * constraint_length * sizeof(double), cudaMemcpyDeviceToHost);
}
else{
A_con[link_id] = nullptr;
d_con[link_id] = nullptr;
delta_con[link_id] = nullptr;
}
cudaFree(dev_buff_obstacles);
cudaFree(dev_frs_k_dep_G);
}
cudaFree(dev_OZ);
}
__global__ void buff_obstacles_kernel(uint32_t link_id, uint32_t RZ_length, double* RZ, bool* c_idx, uint8_t* k_idx, uint8_t* C_idx, double* OZ, uint32_t OZ_unit_length, double* buff_obstacles, double* frs_k_dep_G, bool* k_con, uint8_t* k_con_num) {
uint32_t obstacle_id = blockIdx.x;
uint32_t obstacle_base = obstacle_id * OZ_unit_length;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t z_id = threadIdx.x;
uint32_t buff_obstacle_length = RZ_length + 3;
uint32_t RZ_base = time_id * RZ_length;
uint32_t k_start = time_id * RZ_length;
uint32_t k_end = (2 * (link_id + 1) * n_time_steps + time_id) * RZ_length;
uint32_t k_step = n_time_steps * RZ_length;
uint32_t k_con_num_base = time_id;
uint32_t buff_base = (obstacle_id * n_time_steps + time_id) * buff_obstacle_length;
// first, find kc_col
__shared__ bool kc_info[MAX_RZ_LENGTH];
kc_info[z_id] = true;
for (uint32_t i = k_start; i < k_end; i += k_step) {
kc_info[z_id] &= (k_idx[i + z_id] != 1) || (C_idx[i + z_id] != 1);
}
kc_info[z_id] &= c_idx[RZ_base + z_id];
__syncthreads();
if (z_id == 0) { // process the original obstacle zonotope
for (uint32_t i = 0; i < 3; i++) {
buff_obstacles[buff_base * 3 + i] = OZ[obstacle_base * 3 + i] - RZ[RZ_base * 3 + i];
}
for (uint32_t obs_g = 1; obs_g < OZ_unit_length; obs_g++) {
for (uint32_t i = 0; i < 3; i++) {
buff_obstacles[(buff_base + obs_g) * 3 + i] = OZ[(obstacle_base + obs_g) * 3 + i];
// buffer the obstacle, suppose the generators are an eye matrix
if(i == obs_g - 1) buff_obstacles[(buff_base + obs_g) * 3 + i] += BUFFER_DIST / 2.0;
}
}
}
else if (z_id == 1) { // find k-dependent generators and complete k_con
if (obstacle_id == 0) {
uint8_t k_dep_num = 0;
for (uint32_t z = 1; z < RZ_length; z++) {
if (kc_info[z]) {
for (uint32_t j = k_start; j < k_end; j += k_step) {
k_con[j + k_dep_num] = (k_idx[j + z] == 2);
}
for (uint32_t i = 0; i < 3; i++) {
frs_k_dep_G[(RZ_base + k_dep_num) * 3 + i] = RZ[(RZ_base + z) * 3 + i];
}
k_dep_num++;
}
}
k_con_num[k_con_num_base] = k_dep_num;
}
}
else if (z_id == 2) { // find k-independent generators and complete buff_obstacles
uint8_t k_indep_num = OZ_unit_length;
// add a test here, reduce small generators to be a box
double reduced_generators[3];
reduced_generators[0] = 0;
reduced_generators[1] = 0;
reduced_generators[2] = 0;
for (uint32_t z = 1; z < RZ_length; z++) {
if (!kc_info[z]) {
double norm = 0;
for (uint32_t i = 0; i < 3; i++) {
norm += RZ[(RZ_base + z) * 3 + i] * RZ[(RZ_base + z) * 3 + i];
}
if(norm >= TOO_SMALL_POLYTOPE_JUDGE){
for (uint32_t i = 0; i < 3; i++) {
buff_obstacles[(buff_base + k_indep_num) * 3 + i] = RZ[(RZ_base + z) * 3 + i];
}
k_indep_num++;
}
else{
for (uint32_t i = 0; i < 3; i++) {
reduced_generators[i] += RZ[(RZ_base + z) * 3 + i];
}
}
}
}
for (uint32_t i = 0; i < 3; i++) {
for (uint32_t j = 0; j < 3; j++){
if(i == j){
buff_obstacles[(buff_base + k_indep_num) * 3 + j] = reduced_generators[i];
}
else{
buff_obstacles[(buff_base + k_indep_num) * 3 + j] = 0;
}
}
k_indep_num++;
}
}
}
__global__ void polytope(uint32_t buff_obstacle_length, uint32_t k_dep_G_length, double* buff_obstacles, double* frs_k_dep_G, uint8_t* k_con_num, uint32_t A_con_width, double* A_con, double* d_con, double* delta_con) {
uint32_t obstacle_id = blockIdx.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
double buff_obstacle_size = (double)buff_obstacle_length - 1.0;
uint32_t constraint_length = (buff_obstacle_length - 1) * (buff_obstacle_length - 2) / 2;
uint32_t k_con_base = time_id;
uint32_t k_dep_G_base = k_con_base * k_dep_G_length;
uint32_t obs_base = (obstacle_id * n_time_steps + time_id) * buff_obstacle_length;
uint32_t c_id = threadIdx.x;
uint32_t first = (uint32_t)floor(-0.5*sqrt(4 * buff_obstacle_size * buff_obstacle_size - 4 * buff_obstacle_size - 8.0 * ((double)c_id) + 1.0) + buff_obstacle_size - 0.5);
uint32_t first_base = (obs_base + first + 1) * 3;
uint32_t second = c_id + 1 - ((2 * (buff_obstacle_length - 1) - 3 - first) * first) / 2;
uint32_t second_base = (obs_base + second + 1) * 3;
uint32_t con_base = (obstacle_id * n_time_steps + time_id) * constraint_length + c_id;
double A_1 = buff_obstacles[first_base + 1] * buff_obstacles[second_base + 2] - buff_obstacles[first_base + 2] * buff_obstacles[second_base + 1];
double A_2 = buff_obstacles[first_base + 2] * buff_obstacles[second_base ] - buff_obstacles[first_base ] * buff_obstacles[second_base + 2];
double A_3 = buff_obstacles[first_base ] * buff_obstacles[second_base + 1] - buff_obstacles[first_base + 1] * buff_obstacles[second_base ];
double A_s_q = sqrt(A_1 * A_1 + A_2 * A_2 + A_3 * A_3);
if(A_s_q > 0){
A_1 /= A_s_q;
A_2 /= A_s_q;
A_3 /= A_s_q;
}
else{
A_1 = 0;
A_2 = 0;
A_3 = 0;
}
for (uint32_t i = 0; i < k_con_num[k_con_base]; i++) {
A_con[con_base * A_con_width + i] = A_1 * frs_k_dep_G[(k_dep_G_base + i) * 3] + A_2 * frs_k_dep_G[(k_dep_G_base + i) * 3 + 1] + A_3 * frs_k_dep_G[(k_dep_G_base + i) * 3 + 2];
}
double d = A_1 * buff_obstacles[obs_base * 3] + A_2 * buff_obstacles[obs_base * 3 + 1] + A_3 * buff_obstacles[obs_base * 3 + 2];
double deltaD = 0;
for (uint32_t i = 1; i < buff_obstacle_length - k_con_num[k_con_base]; i++) {
deltaD += abs(A_1 * buff_obstacles[(obs_base + i) * 3] + A_2 * buff_obstacles[(obs_base + i) * 3 + 1] + A_3 * buff_obstacles[(obs_base + i) * 3 + 2]);
}
if (A_s_q > 0) {
d_con[con_base] = d;
delta_con[con_base] = deltaD;
}
else {
d_con[con_base] = 0;
delta_con[con_base] = A_BIG_NUMBER;
}
}
void rotatotopeArray::generate_self_constraints(uint32_t n_pairs_input, uint32_t* self_pairs_input){
n_pairs = n_pairs_input;
self_pairs = self_pairs_input;
A_con_self = new double*[n_pairs];
dev_A_con_self = new double*[n_pairs];
d_con_self = new double*[n_pairs];
dev_d_con_self = new double*[n_pairs];
delta_con_self = new double*[n_pairs];
dev_delta_con_self = new double*[n_pairs];
k_con_self = new bool*[n_pairs];
dev_k_con_self = new bool*[n_pairs];
k_con_num_self = new uint8_t*[n_pairs];
dev_k_con_num_self = new uint8_t*[n_pairs];
max_k_con_num_self = new uint32_t[n_pairs];
for(uint32_t pair_id = 0; pair_id < n_pairs; pair_id++){
uint32_t R1 = self_pairs[pair_id * 2];
uint32_t R2 = self_pairs[pair_id * 2 + 1];
uint32_t R1_length = RZ_length[R1];
uint32_t R2_length = RZ_length[R2];
uint32_t gen_zono_length = R2_length;
uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2;
uint32_t k_dep_length = R2_length;
// buffer the obstacle by k-independent generators
k_con_self[pair_id] = new bool[2 * (R2 + 1) * n_time_steps * k_dep_length];
cudaMalloc((void**)&(dev_k_con_self[pair_id]), 2 * (R2 + 1) * n_time_steps * k_dep_length * sizeof(bool));
cudaMemset(dev_k_con_self[pair_id], 0, 2 * (R2 + 1) * n_time_steps * k_dep_length * sizeof(bool));
k_con_num_self[pair_id] = new uint8_t[n_time_steps];
cudaMalloc((void**)&(dev_k_con_num_self[pair_id]), n_time_steps * sizeof(uint8_t));
double* dev_gen_zono;
cudaMalloc((void**)&dev_gen_zono, n_time_steps * gen_zono_length * Z_width * sizeof(double));
cudaMemset(dev_gen_zono, 0, n_time_steps * gen_zono_length * Z_width * sizeof(double));
double* dev_k_dep_pt;
cudaMalloc((void**)&dev_k_dep_pt, n_time_steps * k_dep_length * Z_width * sizeof(double));
cudaMemset(dev_k_dep_pt, 0, n_time_steps * k_dep_length * Z_width * sizeof(double));
gen_zono_kernel << < n_time_steps, R2_length >> > (R1, R2, R1_length, R2_length, dev_RZ_stack[R1], dev_c_idx_stack[R1], dev_k_idx_stack[R1], dev_C_idx_stack[R1], dev_RZ_stack[R2], dev_c_idx_stack[R2], dev_k_idx_stack[R2], dev_C_idx_stack[R2], dev_gen_zono, dev_k_dep_pt, dev_k_con_self[pair_id], dev_k_con_num_self[pair_id]);
if(debugMode){
cudaMemcpy(k_con_self[pair_id], dev_k_con_self[pair_id], 2 * (R2 + 1) * n_time_steps * k_dep_length * sizeof(bool), cudaMemcpyDeviceToHost);
}
cudaMemcpy(k_con_num_self[pair_id], dev_k_con_num_self[pair_id], n_time_steps * sizeof(uint8_t), cudaMemcpyDeviceToHost);
// find the maximum width of A_con for memory allocation
max_k_con_num_self[pair_id] = 0;
for (uint32_t i = 0; i < n_time_steps; i++) {
if (k_con_num_self[pair_id][i] > max_k_con_num_self[pair_id]) {
max_k_con_num_self[pair_id] = k_con_num_self[pair_id][i];
}
}
// generate obstacles polynomials
cudaMalloc((void**)&(dev_A_con_self[pair_id]), n_time_steps * constraint_length * max_k_con_num_self[pair_id] * sizeof(double));
cudaMalloc((void**)&(dev_d_con_self[pair_id]), n_time_steps * constraint_length * sizeof(double));
cudaMalloc((void**)&(dev_delta_con_self[pair_id]), n_time_steps * constraint_length * sizeof(double));
dim3 grid2(1, n_time_steps, 1);
polytope << < grid2, constraint_length >> > (gen_zono_length, k_dep_length, dev_gen_zono, dev_k_dep_pt, dev_k_con_num_self[pair_id], max_k_con_num_self[pair_id], dev_A_con_self[pair_id], dev_d_con_self[pair_id], dev_delta_con_self[pair_id]);
if(debugMode){
A_con_self[pair_id] = new double[n_time_steps * constraint_length * max_k_con_num_self[pair_id]];
cudaMemcpy(A_con_self[pair_id], dev_A_con_self[pair_id], n_time_steps * constraint_length * max_k_con_num_self[pair_id] * sizeof(double), cudaMemcpyDeviceToHost);
d_con_self[pair_id] = new double[n_time_steps * constraint_length];
cudaMemcpy(d_con_self[pair_id], dev_d_con_self[pair_id], n_time_steps * constraint_length * sizeof(double), cudaMemcpyDeviceToHost);
delta_con_self[pair_id] = new double[n_time_steps * constraint_length];
cudaMemcpy(delta_con_self[pair_id], dev_delta_con_self[pair_id], n_time_steps * constraint_length * sizeof(double), cudaMemcpyDeviceToHost);
}
else{
A_con_self[pair_id] = nullptr;
d_con_self[pair_id] = nullptr;
delta_con_self[pair_id] = nullptr;
}
cudaFree(dev_gen_zono);
cudaFree(dev_k_dep_pt);
}
}
__global__ void gen_zono_kernel(uint32_t link_id_1, uint32_t link_id_2, uint32_t RZ_length_1, uint32_t RZ_length_2, double* RZ_1, bool* c_idx_1, uint8_t* k_idx_1, uint8_t* C_idx_1, double* RZ_2, bool* c_idx_2, uint8_t* k_idx_2, uint8_t* C_idx_2, double* gen_zono, double* k_dep_pt, bool* k_con_self, uint8_t* k_con_num_self) {
uint32_t time_id = blockIdx.x;
uint32_t n_time_steps = gridDim.x;
uint32_t z_id = threadIdx.x;
uint32_t gen_zono_length = RZ_length_2;
uint32_t gen_zono_base = time_id * gen_zono_length;
uint32_t k_con_num_base = time_id;
uint32_t RZ_base_1 = time_id * RZ_length_1;
uint32_t k_start_1 = time_id * RZ_length_1;
uint32_t k_end_1 = (2 * (link_id_1 + 1) * n_time_steps + time_id) * RZ_length_1;
uint32_t k_step_1 = n_time_steps * RZ_length_1;
uint32_t RZ_base_2 = time_id * RZ_length_2;
uint32_t k_start_2 = time_id * RZ_length_2;
uint32_t k_end_2 = (2 * (link_id_2 + 1) * n_time_steps + time_id) * RZ_length_2;
uint32_t k_step_2 = n_time_steps * RZ_length_2;
// first, find kc_col for both links in a pair
__shared__ bool kc_info_1[MAX_RZ_LENGTH];
__shared__ bool kc_info_2[MAX_RZ_LENGTH];
if(z_id < RZ_length_1){
kc_info_1[z_id] = false;
for (uint32_t i = k_start_1; i < k_end_1; i += k_step_1) {
kc_info_1[z_id] &= (k_idx_1[i + z_id] != 1) || (C_idx_1[i + z_id] != 1);
}
kc_info_1[z_id] &= c_idx_1[RZ_base_1 + z_id];
}
kc_info_2[z_id] = false;
for (uint32_t i = k_start_2; i < k_end_2; i += k_step_2) {
kc_info_2[z_id] &= (k_idx_2[i + z_id] != 1) || (C_idx_2[i + z_id] != 1);
}
kc_info_2[z_id] &= c_idx_2[RZ_base_2 + z_id];
__syncthreads();
if (z_id == 0) { // process the center
for (uint32_t i = 0; i < 3; i++) {
gen_zono[gen_zono_base * 3 + i] = RZ_1[RZ_base_1 * 3 + i] - RZ_2[RZ_base_2 * 3 + i];
}
}
else if (z_id == 1) { // find k-dependent generators and complete k_con
uint8_t k_dep_num = 0;
for (uint32_t z = 1; z < RZ_length_1; z++) {
if(k_dep_num >= RZ_length_2){
break;
}
if (kc_info_1[z]) {
for (uint32_t j = k_start_1; j < k_end_1; j += k_step_1) {
k_con_self[j + k_dep_num] = (k_idx_1[j + z] == 2);
}
for (uint32_t i = 0; i < 3; i++) {
k_dep_pt[(RZ_base_2 + k_dep_num) * 3 + i] = -RZ_1[(RZ_base_1 + z) * 3 + i];
}
k_dep_num++;
}
}
for (uint32_t z = 1; z < RZ_length_2; z++) {
if(k_dep_num >= RZ_length_2){
break;
}
if (kc_info_2[z]) {
uint32_t kj = k_start_1;
for (uint32_t j = k_start_2; j < k_end_2; j += k_step_2) {
k_con_self[kj + k_dep_num] = (k_idx_2[j + z] == 2);
kj += k_step_1;
}
for (uint32_t i = 0; i < 3; i++) {
k_dep_pt[(RZ_base_2 + k_dep_num) * 3 + i] = RZ_2[(RZ_base_2 + z) * 3 + i];
}
k_dep_num++;
}
}
k_con_num_self[k_con_num_base] = k_dep_num;
}
else if (z_id == 2) { // find k-independent generators and complete gen_zono
uint8_t k_indep_num = 1;
// add a test here, reduce small generators to be a box
double reduced_generators[3];
reduced_generators[0] = 0;
reduced_generators[1] = 0;
reduced_generators[2] = 0;
for (uint32_t z = 1; z < RZ_length_1; z++) {
if (!kc_info_1[z]) {
double norm = 0;
for (uint32_t i = 0; i < 3; i++) {
norm += RZ_1[(RZ_base_1 + z) * 3 + i] * RZ_1[(RZ_base_1 + z) * 3 + i];
}
if(norm >= TOO_SMALL_POLYTOPE_JUDGE){
for (uint32_t i = 0; i < 3; i++) {
gen_zono[(gen_zono_base + k_indep_num) * 3 + i] = RZ_1[(RZ_base_1 + z) * 3 + i];
}
k_indep_num++;
}
else{
for (uint32_t i = 0; i < 3; i++) {
reduced_generators[i] += RZ_1[(RZ_base_1 + z) * 3 + i];
}
}
}
}
for (uint32_t z = 1; z < RZ_length_2; z++) {
if (!kc_info_2[z]) {
double norm = 0;
for (uint32_t i = 0; i < 3; i++) {
norm += RZ_2[(RZ_base_2 + z) * 3 + i] * RZ_2[(RZ_base_2 + z) * 3 + i];
}
if(norm >= TOO_SMALL_POLYTOPE_JUDGE){
for (uint32_t i = 0; i < 3; i++) {
gen_zono[(gen_zono_base + k_indep_num) * 3 + i] = RZ_2[(RZ_base_2 + z) * 3 + i];
}
k_indep_num++;
}
else{
for (uint32_t i = 0; i < 3; i++) {
reduced_generators[i] += RZ_2[(RZ_base_2 + z) * 3 + i];
}
}
if(k_indep_num >= gen_zono_length - 3){
break;
}
}
}
for (uint32_t i = 0; i < 3; i++) {
for (uint32_t j = 0; j < 3; j++){
if(i == j){
gen_zono[(gen_zono_base + k_indep_num) * 3 + j] = reduced_generators[i] + BUFFER_DIST;
}
else{
gen_zono[(gen_zono_base + k_indep_num) * 3 + j] = 0;
}
}
k_indep_num++;
}
}
}
void rotatotopeArray::evaluate_constraints(double* k_opt) {
start_t = clock();
if(n_obstacles > 0 && con != nullptr){
delete[] con;
delete[] jaco_con;
delete[] hess_con;
}
if(con_self != nullptr){
delete[] con_self;
delete[] jaco_con_self;
delete[] hess_con_self;
}
memcpy(current_k_opt, k_opt, n_links * 2 * sizeof(double));
double* dev_con = nullptr;
double* dev_jaco_con = nullptr;
double* dev_hess_con = nullptr;
if(n_obstacles > 0){
con = new double[n_links * n_obstacles * n_time_steps];
cudaMalloc((void**)&dev_con, n_links * n_obstacles * n_time_steps * sizeof(double));
jaco_con = new double[n_links * n_obstacles * n_time_steps * n_links * 2];
cudaMalloc((void**)&dev_jaco_con, n_links * n_obstacles * n_time_steps * n_links * 2 * sizeof(double));
cudaMemset(dev_jaco_con, 0, n_links * n_obstacles * n_time_steps * n_links * 2 * sizeof(double));
hess_con = new double[n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1)];
cudaMalloc((void**)&dev_hess_con, n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
cudaMemset(dev_hess_con, 0, n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
}
con_self = new double[n_pairs * n_time_steps];
double* dev_con_self;
cudaMalloc((void**)&dev_con_self, n_pairs * n_time_steps * sizeof(double));
jaco_con_self = new double[n_pairs * n_time_steps * n_links * 2];
double* dev_jaco_con_self;
cudaMalloc((void**)&dev_jaco_con_self, n_pairs * n_time_steps * n_links * 2 * sizeof(double));
cudaMemset(dev_jaco_con_self, 0, n_pairs * n_time_steps * n_links * 2 * sizeof(double));
hess_con_self = new double[n_pairs * n_time_steps * n_links * (n_links * 2 - 1)];
double* dev_hess_con_self;
cudaMalloc((void**)&dev_hess_con_self, n_pairs * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
cudaMemset(dev_hess_con_self, 0, n_pairs * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double));
double* lambda = new double[n_links * 2];
for (uint32_t joint_id = 0; joint_id < n_links * 2; joint_id++) {
lambda[joint_id] = c_k[joint_id] + k_opt[joint_id] / g_k[joint_id];
}
double* dev_lambda;
cudaMalloc((void**)&dev_lambda, n_links * 2 * sizeof(double));
cudaMemcpy(dev_lambda, lambda, n_links * 2 * sizeof(double), cudaMemcpyHostToDevice);
double* dev_g_k;
cudaMalloc((void**)&dev_g_k, n_links * 2 * sizeof(double));
cudaMemcpy(dev_g_k, g_k, n_links * 2 * sizeof(double), cudaMemcpyHostToDevice);
// obstacles constraint evaluation
if(n_obstacles > 0){
for (uint32_t link_id = 0; link_id < n_links; link_id++) {
uint32_t buff_obstacle_length = RZ_length[link_id] + 3;
uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2;
double* dev_con_result; // results of evaluation of constriants
bool* dev_index_factor; // whether the constraints are positive or negative
cudaMalloc((void**)&dev_con_result, n_obstacles * n_time_steps * constraint_length * sizeof(double));
cudaMalloc((void**)&dev_index_factor, n_obstacles * n_time_steps * constraint_length * sizeof(bool));
dim3 grid1(n_obstacles, n_time_steps, 1);
dim3 block1(constraint_length, 1, 1);
evaluate_constraints_kernel << < grid1, block1 >> > (dev_lambda, link_id, RZ_length[link_id], dev_A_con[link_id], max_k_con_num[link_id], dev_d_con[link_id], dev_delta_con[link_id], dev_k_con[link_id], dev_k_con_num[link_id], dev_con_result, dev_index_factor);
dim3 grid2(n_obstacles, n_time_steps, 1);
dim3 block2((link_id + 1) * 2, (link_id + 1) * 2, 1);
evaluate_gradient_kernel << < grid2, block2 >> > (dev_con_result, dev_index_factor, link_id, link_id, RZ_length[link_id], constraint_length, dev_lambda, dev_g_k, dev_A_con[link_id], max_k_con_num[link_id], dev_k_con[link_id], dev_k_con_num[link_id], n_links, dev_con, dev_jaco_con, dev_hess_con);
cudaFree(dev_con_result);
cudaFree(dev_index_factor);
}
}
// self intersection constraint evaluation
for (uint32_t pair_id = 0; pair_id < n_pairs; pair_id++) {
uint32_t R1 = self_pairs[pair_id * 2];
uint32_t R2 = self_pairs[pair_id * 2 + 1];
uint32_t R1_length = RZ_length[R1];
uint32_t R2_length = RZ_length[R2];
uint32_t gen_zono_length = R2_length;
uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2;
double* dev_con_result; // results of evaluation of constriants
bool* dev_index_factor; // whether the constraints are positive or negative
cudaMalloc((void**)&dev_con_result, n_time_steps * constraint_length * sizeof(double));
cudaMalloc((void**)&dev_index_factor, n_time_steps * constraint_length * sizeof(bool));
dim3 grid1(1, n_time_steps, 1);
dim3 block1(constraint_length, 1, 1);
evaluate_constraints_kernel << < grid1, block1 >> > (dev_lambda, R2, R1_length, dev_A_con_self[pair_id], max_k_con_num_self[pair_id], dev_d_con_self[pair_id], dev_delta_con_self[pair_id], dev_k_con_self[pair_id], dev_k_con_num_self[pair_id], dev_con_result, dev_index_factor);
dim3 grid2(1, n_time_steps, 1);
dim3 block2((R2 + 1) * 2, (R2 + 1) * 2, 1);
evaluate_gradient_kernel << < grid2, block2 >> > (dev_con_result, dev_index_factor, R2, pair_id, R1_length, constraint_length, dev_lambda, dev_g_k, dev_A_con_self[pair_id], max_k_con_num_self[pair_id], dev_k_con_self[pair_id], dev_k_con_num_self[pair_id], n_links, dev_con_self, dev_jaco_con_self, dev_hess_con_self);
cudaFree(dev_con_result);
cudaFree(dev_index_factor);
}
if(n_obstacles > 0){
cudaMemcpy(con, dev_con, n_links * n_obstacles * n_time_steps * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_con);
cudaMemcpy(jaco_con, dev_jaco_con, n_links * n_obstacles * n_time_steps * n_links * 2 * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_jaco_con);
cudaMemcpy(hess_con, dev_hess_con, n_links * n_obstacles * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_hess_con);
}
cudaMemcpy(con_self, dev_con_self, n_pairs * n_time_steps * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_con_self);
cudaMemcpy(jaco_con_self, dev_jaco_con_self, n_pairs * n_time_steps * n_links * 2 * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_jaco_con_self);
cudaMemcpy(hess_con_self, dev_hess_con_self, n_pairs * n_time_steps * n_links * (n_links * 2 - 1) * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_hess_con_self);
delete[] lambda;
cudaFree(dev_lambda);
cudaFree(dev_g_k);
end_t = clock();
mexPrintf("CUDA: constraint evaluation time: %.6f ms\n", 1000.0 * (end_t - start_t) / (double)(CLOCKS_PER_SEC));
}
__global__ void evaluate_constraints_kernel(double* lambda, uint32_t link_id, uint32_t RZ_length, double* A_con, uint32_t A_con_width, double* d_con, double* delta_con, bool* k_con, uint8_t* k_con_num, double* con_result, bool* index_factor) {
uint32_t obstacle_id = blockIdx.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t c_id = threadIdx.x;
uint32_t constraint_length = blockDim.x;
uint32_t k_con_num_base = time_id;
uint32_t con_base = (obstacle_id * n_time_steps + time_id) * constraint_length + c_id;
uint32_t con_result_base = (obstacle_id * n_time_steps + time_id) * constraint_length + c_id;
__shared__ double shared_lambda[6];
__shared__ double lambdas_prod[MAX_K_DEP_SIZE];
if (c_id < 2 * (link_id + 1)) {
shared_lambda[c_id] = lambda[c_id];
}
__syncthreads();
if (c_id < k_con_num[k_con_num_base]) {
double prod = 1.0;
for (uint32_t j = 0; j < 2 * (link_id + 1); j++) {
if (k_con[(j * n_time_steps + time_id) * RZ_length + c_id]) {
prod *= shared_lambda[j];
}
}
lambdas_prod[c_id] = prod;
}
__syncthreads();
if (delta_con[con_base] == A_BIG_NUMBER){
con_result[con_result_base] = -A_BIG_NUMBER;
index_factor[con_result_base] = false;
}
else{
double result = 0;
for (uint32_t p = 0; p < k_con_num[k_con_num_base]; p++){
result += lambdas_prod[p] * A_con[con_base * A_con_width + p];
}
double pos_result = result - d_con[con_base] - delta_con[con_base];
double neg_result = -result + d_con[con_base] - delta_con[con_base];
if(pos_result > neg_result){
con_result[con_result_base] = pos_result;
index_factor[con_result_base] = true;
}
else{
con_result[con_result_base] = neg_result;
index_factor[con_result_base] = false;
}
}
}
__global__ void evaluate_gradient_kernel(double* con_result, bool* index_factor, uint32_t link_id, uint32_t pos_id, uint32_t RZ_length, uint32_t constraint_length, double* lambda, double* g_k, double* A_con, uint32_t A_con_width, bool* k_con, uint8_t* k_con_num, uint32_t n_links, double* con, double* jaco_con, double* hess_con) {
uint32_t obstacle_id = blockIdx.x;
uint32_t n_obstacles = gridDim.x;
uint32_t time_id = blockIdx.y;
uint32_t n_time_steps = gridDim.y;
uint32_t joint_id = threadIdx.x;
uint32_t joint_id_sec = threadIdx.y;
uint32_t k_con_num_base = time_id;
uint32_t con_result_base = (obstacle_id * n_time_steps + time_id) * constraint_length;
__shared__ uint32_t max_idx;
uint32_t valu_con_base = (pos_id * n_obstacles + obstacle_id) * n_time_steps + time_id;
uint32_t jaco_con_base = ((pos_id * n_obstacles + obstacle_id) * n_time_steps + time_id) * n_links * 2;
uint32_t hess_con_base = ((pos_id * n_obstacles + obstacle_id) * n_time_steps + time_id) * n_links * (n_links * 2 - 1);
__shared__ double shared_lambda[6];
__shared__ double max_index_factor;
if (joint_id_sec == 0) {
if(joint_id == 0){
double maximum = -A_BIG_NUMBER - A_BIG_NUMBER;
max_idx = 0;
for (uint32_t i = 0; i < constraint_length; i++) {
double cur = con_result[con_result_base + i];
if (maximum < cur) {
max_idx = con_result_base + i;
maximum = cur;
}
}
con[valu_con_base] = -maximum + CONSERVATIVE_BUFFER;
if(index_factor[max_idx]){
max_index_factor = 1.0;
}
else{
max_index_factor = -1.0;
}
}
else if (joint_id <= 2 * (link_id + 1)) {
shared_lambda[joint_id - 1] = lambda[joint_id - 1];
}
}
__syncthreads();
if(joint_id == joint_id_sec){
double result = 0;
for (uint32_t p = 0; p < k_con_num[k_con_num_base]; p++) {
if(k_con[(joint_id * n_time_steps + time_id) * RZ_length + p]){
double prod = 1.0;
for (uint32_t j = 0; j < 2 * (link_id + 1); j++) {
if (j != joint_id && k_con[(j * n_time_steps + time_id) * RZ_length + p]) {
prod *= shared_lambda[j];
}
}
result += prod * max_index_factor * A_con[max_idx * A_con_width + p];
}
}
jaco_con[jaco_con_base + joint_id] = -result / g_k[joint_id];
}
else if(joint_id > joint_id_sec){
double result = 0;
for (uint32_t p = 0; p < k_con_num[k_con_num_base]; p++) {
if(k_con[(joint_id * n_time_steps + time_id) * RZ_length + p] && k_con[(joint_id_sec * n_time_steps + time_id) * RZ_length + p]){
double prod = 1.0;
for (uint32_t j = 0; j < 2 * (link_id + 1); j++) {
if (j != joint_id && j != joint_id_sec && k_con[(j * n_time_steps + time_id) * RZ_length + p]) {
prod *= shared_lambda[j];
}
}
result += prod * max_index_factor * A_con[max_idx * A_con_width + p];
}
}
uint32_t hess_index = 0;
for(uint32_t i = 0; i < joint_id_sec; i++){
hess_index += n_links * 2 - 1 - i;
}
hess_con[hess_con_base + joint_id * (joint_id - 1) / 2 + joint_id_sec] = -result / g_k[joint_id] / g_k[joint_id_sec];
}
}
rotatotopeArray::~rotatotopeArray() {
cudaFree(dev_Z);
if (n_links > 0) {
cudaFree(dev_RZ);
cudaFree(dev_c_idx);
cudaFree(dev_k_idx);
cudaFree(dev_C_idx);
}
if (c_k != nullptr) {
delete[] c_k;
delete[] g_k;
}
if (dev_RZ_stack != nullptr) {
for (uint32_t i = 0; i < n_links; i++) {
delete[] RZ_stack[i];
}
delete[] RZ_stack;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_RZ_stack[i]);
}
delete[] dev_RZ_stack;
for (uint32_t i = 0; i < n_links; i++) {
delete[] c_idx_stack[i];
}
delete[] c_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_c_idx_stack[i]);
}
delete[] dev_c_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
delete[] k_idx_stack[i];
}
delete[] k_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_k_idx_stack[i]);
}
delete[] dev_k_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
delete[] C_idx_stack[i];
}
delete[] C_idx_stack;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_C_idx_stack[i]);
}
delete[] dev_C_idx_stack;
delete[] RZ_length;
}
if (n_obstacles > 0 && A_con != nullptr) {
for (uint32_t i = 0; i < n_links; i++) {
delete[] A_con[i];
}
delete[] A_con;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_A_con[i]);
}
delete[] dev_A_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] d_con[i];
}
delete[] d_con;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_d_con[i]);
}
delete[] dev_d_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] delta_con[i];
}
delete[] delta_con;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_delta_con[i]);
}
delete[] dev_delta_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] k_con[i];
}
delete[] k_con;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_k_con[i]);
}
delete[] dev_k_con;
for (uint32_t i = 0; i < n_links; i++) {
delete[] k_con_num[i];
}
delete[] k_con_num;
for (uint32_t i = 0; i < n_links; i++) {
cudaFree(dev_k_con_num[i]);
}
delete[] dev_k_con_num;
delete[] max_k_con_num;
}
if(dev_A_con_self != nullptr){
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] A_con_self[i];
}
delete[] A_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
cudaFree(dev_A_con_self[i]);
}
delete[] dev_A_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] d_con_self[i];
}
delete[] d_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
cudaFree(dev_d_con_self[i]);
}
delete[] dev_d_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] delta_con_self[i];
}
delete[] delta_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
cudaFree(dev_delta_con_self[i]);
}
delete[] dev_delta_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] k_con_self[i];
}
delete[] k_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
cudaFree(dev_k_con_self[i]);
}
delete[] dev_k_con_self;
for (uint32_t i = 0; i < n_pairs; i++) {
delete[] k_con_num_self[i];
}
delete[] k_con_num_self;
for (uint32_t i = 0; i < n_pairs; i++) {
cudaFree(dev_k_con_num_self[i]);
}
delete[] dev_k_con_num_self;
delete[] max_k_con_num_self;
}
if (debug_RZ != nullptr) {
delete[] debug_RZ;
delete[] debug_c_idx;
delete[] debug_k_idx;
}
if (con != nullptr) {
delete[] con;
delete[] jaco_con;
delete[] hess_con;
}
if (con_self != nullptr) {
delete[] con_self;
delete[] jaco_con_self;
delete[] hess_con_self;
}
delete[] current_k_opt;
}
#endif // !ROTATOTOPE_ARRAY_CPPs
|
d8fd3edb78bf2fb8c01864240a76bb24b33be1c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/gather_scatter_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
class TensorAssign {
public:
template <typename tensor_t>
constexpr void operator()(tensor_t* self_data, tensor_t* src_data) const {
*self_data = *src_data;
}
};
static TensorAssign tensor_assign;
class ReduceAdd {
public:
template <
typename tensor_t,
std::enable_if_t<!std::is_same<tensor_t, uint8_t>::value>* = nullptr>
__device__ void operator()(tensor_t* self_data, tensor_t* src_data) const {
platform::CudaAtomicAdd(self_data, *src_data);
}
template <typename tensor_t,
std::enable_if_t<std::is_same<tensor_t, uint8_t>::value>* = nullptr>
__device__ void operator()(tensor_t* self_data, tensor_t* src_data) const {
*self_data += *src_data;
}
};
static ReduceAdd reduce_add;
class ReduceMul {
public:
template <typename tensor_t>
__device__ void operator()(tensor_t* self_data, tensor_t* src_data) const {
*self_data *= *src_data;
// TODO(huangxu96) platform::CudaAtomicMul(*self_data, *src_data);
}
};
static ReduceMul reduce_mul;
template <typename tensor_t, typename index_t, typename func_t,
bool is_scatter_like = true>
__global__ void GatherScatterGPUKernel(
tensor_t* self_data, int dim, const index_t* index_data, tensor_t* src_data,
int64_t inner_dim_size, int select_dim_size, int replaced_select_dim_size,
int64_t outer_dim_size, int64_t numel, const func_t& reduce_op) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= numel) return;
int64_t i, j, k; // The i, j, k here is the index of the 3 layers loop
// squeezed from the N layers loop.
/* tid = i * select_dim_size * outer_dim_size + j * outer_dim_size + k */
i = tid / (select_dim_size * outer_dim_size);
int64_t remind = tid % (select_dim_size * outer_dim_size);
j = remind / outer_dim_size;
k = remind % outer_dim_size;
index_t index = index_data[tid];
/*
gather computation formula:
self[i][j][k] = src[index[i][j][k]][j][k] # if dim == 0
self[i][j][k] = src[i][index[i][j][k]][k] # if dim == 1
self[i][j][k] = src[i][j][index[i][j][k]] # if dim == 2
scatter computation formula:
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
*/
// index matrix has different shape with self matrix or src matrix.
int64_t replace_index = k + index * outer_dim_size +
i * outer_dim_size * replaced_select_dim_size;
int64_t self_idx = is_scatter_like ? replace_index : tid;
int64_t src_idx = is_scatter_like ? tid : replace_index;
reduce_op((tensor_t*)(self_data + self_idx), (tensor_t*)(src_data + src_idx));
}
template <typename tensor_t, typename index_t = int64_t,
bool is_scatter_like = true>
struct gpu_gather_scatter_functor {
template <typename func_t>
void operator()(Tensor self, int dim, const Tensor& index, Tensor src,
const std::string& method_name, const func_t& reduce_op,
const platform::DeviceContext& ctx) {
if (index.numel() == 0) {
return;
}
auto* self_data = self.data<tensor_t>();
auto* index_data = index.data<index_t>();
auto* src_data = src.data<tensor_t>();
int64_t self_size = self.numel();
int64_t index_size = index.numel();
int64_t src_size = src.numel();
auto self_dims = self.dims();
auto index_dims = index.dims();
auto src_dims = src.dims();
if (self_size == 0 || src_size == 0 || index_size == 0) return;
int select_dim_size = index_dims[dim];
// index matrix has different shape with self matrix or src matrix.
int replaced_select_dim_size =
is_scatter_like ? self_dims[dim] : src_dims[dim];
int64_t inner_dim_size = 1;
int64_t outer_dim_size = 1;
for (int64_t i = 0; i < dim; ++i) {
inner_dim_size *= index_dims[i];
}
for (int i = dim + 1; i < index_dims.size(); i++) {
outer_dim_size *= index_dims[i];
}
int block = 512;
int64_t n = inner_dim_size * select_dim_size * outer_dim_size;
int64_t grid = (n + block - 1) / block;
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
hipLaunchKernelGGL(( GatherScatterGPUKernel<tensor_t, index_t, func_t,
is_scatter_like>), dim3(grid), dim3(block), 0, stream,
self_data, dim, index_data, src_data, inner_dim_size, select_dim_size,
replaced_select_dim_size, outer_dim_size, index_size, reduce_op);
}
}; // struct gpu_gather_scatter_functor
template <typename tensor_t, typename index_t>
void gpu_gather_kernel(Tensor self, int dim, const Tensor& index, Tensor result,
const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/false>()(
result, dim, index, self, "gather_out_gpu", tensor_assign, ctx);
return;
}
template <typename tensor_t, typename index_t>
void gpu_scatter_assign_kernel(Tensor self, int dim, const Tensor& index,
Tensor src, const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/true>()(
self, dim, index, src, "scatter_assign_gpu", tensor_assign, ctx);
}
template <typename tensor_t, typename index_t>
void gpu_scatter_add_kernel(Tensor self, int dim, const Tensor& index,
Tensor src, const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/true>()(
self, dim, index, src, "scatter_add_gpu", reduce_add, ctx);
}
template <typename tensor_t, typename index_t>
void gpu_scatter_mul_kernel(Tensor self, int dim, const Tensor& index,
Tensor src, const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/true>()(
self, dim, index, src, "scatter_mul_gpu", reduce_mul, ctx);
}
template <typename tensor_t, typename index_t>
__global__ void ScatterInputGradGPUKernel(
tensor_t* grad_data, int dim, const index_t* index_data,
int64_t inner_dim_size, int select_dim_size, int grad_select_dim_size,
int64_t outer_dim_size, int64_t numel) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= numel) return;
int64_t i, j, k;
i = tid / (select_dim_size * outer_dim_size);
int64_t remind = tid % (select_dim_size * outer_dim_size);
j = remind / outer_dim_size;
k = remind % outer_dim_size;
index_t index = index_data[tid];
int64_t replace_index =
k + index * outer_dim_size + i * outer_dim_size * grad_select_dim_size;
grad_data[replace_index] = 0;
}
template <typename tensor_t, typename index_t>
void gpu_scatter_input_grad_kernel(Tensor self, int dim, const Tensor& index,
Tensor grad,
const platform::DeviceContext& ctx) {
auto* index_data = index.data<index_t>();
auto* grad_data = grad.data<tensor_t>();
auto index_dims = index.dims();
auto grad_dims = grad.dims();
int64_t index_size = index.numel();
int64_t inner_dim_size = 1;
int64_t outer_dim_size = 1;
int select_dim_size = index_dims[dim];
int grad_select_dim_size = grad_dims[dim];
for (int64_t i = 0; i < dim; ++i) {
inner_dim_size *= index_dims[i];
}
for (int i = dim + 1; i < index_dims.size(); i++) {
outer_dim_size *= index_dims[i];
}
int block = 512;
int64_t n = inner_dim_size * select_dim_size * outer_dim_size;
int64_t grid = (n + block - 1) / block;
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
hipLaunchKernelGGL(( ScatterInputGradGPUKernel<tensor_t, index_t>), dim3(grid), dim3(block), 0, stream,
grad_data, dim, index_data, inner_dim_size, select_dim_size,
grad_select_dim_size, outer_dim_size, index_size);
}
Instantiate_Template_Function(gpu_gather_kernel)
Instantiate_Template_Function(gpu_scatter_assign_kernel)
Instantiate_Template_Function(gpu_scatter_add_kernel)
Instantiate_Template_Function(gpu_scatter_mul_kernel)
Instantiate_Template_Function(gpu_scatter_input_grad_kernel)
} // namespace operators
} // namespace paddle
|
d8fd3edb78bf2fb8c01864240a76bb24b33be1c7.cu
|
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/gather_scatter_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
class TensorAssign {
public:
template <typename tensor_t>
constexpr void operator()(tensor_t* self_data, tensor_t* src_data) const {
*self_data = *src_data;
}
};
static TensorAssign tensor_assign;
class ReduceAdd {
public:
template <
typename tensor_t,
std::enable_if_t<!std::is_same<tensor_t, uint8_t>::value>* = nullptr>
__device__ void operator()(tensor_t* self_data, tensor_t* src_data) const {
platform::CudaAtomicAdd(self_data, *src_data);
}
template <typename tensor_t,
std::enable_if_t<std::is_same<tensor_t, uint8_t>::value>* = nullptr>
__device__ void operator()(tensor_t* self_data, tensor_t* src_data) const {
*self_data += *src_data;
}
};
static ReduceAdd reduce_add;
class ReduceMul {
public:
template <typename tensor_t>
__device__ void operator()(tensor_t* self_data, tensor_t* src_data) const {
*self_data *= *src_data;
// TODO(huangxu96) platform::CudaAtomicMul(*self_data, *src_data);
}
};
static ReduceMul reduce_mul;
template <typename tensor_t, typename index_t, typename func_t,
bool is_scatter_like = true>
__global__ void GatherScatterGPUKernel(
tensor_t* self_data, int dim, const index_t* index_data, tensor_t* src_data,
int64_t inner_dim_size, int select_dim_size, int replaced_select_dim_size,
int64_t outer_dim_size, int64_t numel, const func_t& reduce_op) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= numel) return;
int64_t i, j, k; // The i, j, k here is the index of the 3 layers loop
// squeezed from the N layers loop.
/* tid = i * select_dim_size * outer_dim_size + j * outer_dim_size + k */
i = tid / (select_dim_size * outer_dim_size);
int64_t remind = tid % (select_dim_size * outer_dim_size);
j = remind / outer_dim_size;
k = remind % outer_dim_size;
index_t index = index_data[tid];
/*
gather computation formula:
self[i][j][k] = src[index[i][j][k]][j][k] # if dim == 0
self[i][j][k] = src[i][index[i][j][k]][k] # if dim == 1
self[i][j][k] = src[i][j][index[i][j][k]] # if dim == 2
scatter computation formula:
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
*/
// index matrix has different shape with self matrix or src matrix.
int64_t replace_index = k + index * outer_dim_size +
i * outer_dim_size * replaced_select_dim_size;
int64_t self_idx = is_scatter_like ? replace_index : tid;
int64_t src_idx = is_scatter_like ? tid : replace_index;
reduce_op((tensor_t*)(self_data + self_idx), (tensor_t*)(src_data + src_idx));
}
template <typename tensor_t, typename index_t = int64_t,
bool is_scatter_like = true>
struct gpu_gather_scatter_functor {
template <typename func_t>
void operator()(Tensor self, int dim, const Tensor& index, Tensor src,
const std::string& method_name, const func_t& reduce_op,
const platform::DeviceContext& ctx) {
if (index.numel() == 0) {
return;
}
auto* self_data = self.data<tensor_t>();
auto* index_data = index.data<index_t>();
auto* src_data = src.data<tensor_t>();
int64_t self_size = self.numel();
int64_t index_size = index.numel();
int64_t src_size = src.numel();
auto self_dims = self.dims();
auto index_dims = index.dims();
auto src_dims = src.dims();
if (self_size == 0 || src_size == 0 || index_size == 0) return;
int select_dim_size = index_dims[dim];
// index matrix has different shape with self matrix or src matrix.
int replaced_select_dim_size =
is_scatter_like ? self_dims[dim] : src_dims[dim];
int64_t inner_dim_size = 1;
int64_t outer_dim_size = 1;
for (int64_t i = 0; i < dim; ++i) {
inner_dim_size *= index_dims[i];
}
for (int i = dim + 1; i < index_dims.size(); i++) {
outer_dim_size *= index_dims[i];
}
int block = 512;
int64_t n = inner_dim_size * select_dim_size * outer_dim_size;
int64_t grid = (n + block - 1) / block;
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
GatherScatterGPUKernel<tensor_t, index_t, func_t,
is_scatter_like><<<grid, block, 0, stream>>>(
self_data, dim, index_data, src_data, inner_dim_size, select_dim_size,
replaced_select_dim_size, outer_dim_size, index_size, reduce_op);
}
}; // struct gpu_gather_scatter_functor
template <typename tensor_t, typename index_t>
void gpu_gather_kernel(Tensor self, int dim, const Tensor& index, Tensor result,
const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/false>()(
result, dim, index, self, "gather_out_gpu", tensor_assign, ctx);
return;
}
template <typename tensor_t, typename index_t>
void gpu_scatter_assign_kernel(Tensor self, int dim, const Tensor& index,
Tensor src, const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/true>()(
self, dim, index, src, "scatter_assign_gpu", tensor_assign, ctx);
}
template <typename tensor_t, typename index_t>
void gpu_scatter_add_kernel(Tensor self, int dim, const Tensor& index,
Tensor src, const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/true>()(
self, dim, index, src, "scatter_add_gpu", reduce_add, ctx);
}
template <typename tensor_t, typename index_t>
void gpu_scatter_mul_kernel(Tensor self, int dim, const Tensor& index,
Tensor src, const platform::DeviceContext& ctx) {
gpu_gather_scatter_functor<tensor_t, index_t,
/*is_scatter_like=*/true>()(
self, dim, index, src, "scatter_mul_gpu", reduce_mul, ctx);
}
template <typename tensor_t, typename index_t>
__global__ void ScatterInputGradGPUKernel(
tensor_t* grad_data, int dim, const index_t* index_data,
int64_t inner_dim_size, int select_dim_size, int grad_select_dim_size,
int64_t outer_dim_size, int64_t numel) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= numel) return;
int64_t i, j, k;
i = tid / (select_dim_size * outer_dim_size);
int64_t remind = tid % (select_dim_size * outer_dim_size);
j = remind / outer_dim_size;
k = remind % outer_dim_size;
index_t index = index_data[tid];
int64_t replace_index =
k + index * outer_dim_size + i * outer_dim_size * grad_select_dim_size;
grad_data[replace_index] = 0;
}
template <typename tensor_t, typename index_t>
void gpu_scatter_input_grad_kernel(Tensor self, int dim, const Tensor& index,
Tensor grad,
const platform::DeviceContext& ctx) {
auto* index_data = index.data<index_t>();
auto* grad_data = grad.data<tensor_t>();
auto index_dims = index.dims();
auto grad_dims = grad.dims();
int64_t index_size = index.numel();
int64_t inner_dim_size = 1;
int64_t outer_dim_size = 1;
int select_dim_size = index_dims[dim];
int grad_select_dim_size = grad_dims[dim];
for (int64_t i = 0; i < dim; ++i) {
inner_dim_size *= index_dims[i];
}
for (int i = dim + 1; i < index_dims.size(); i++) {
outer_dim_size *= index_dims[i];
}
int block = 512;
int64_t n = inner_dim_size * select_dim_size * outer_dim_size;
int64_t grid = (n + block - 1) / block;
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
ScatterInputGradGPUKernel<tensor_t, index_t><<<grid, block, 0, stream>>>(
grad_data, dim, index_data, inner_dim_size, select_dim_size,
grad_select_dim_size, outer_dim_size, index_size);
}
Instantiate_Template_Function(gpu_gather_kernel)
Instantiate_Template_Function(gpu_scatter_assign_kernel)
Instantiate_Template_Function(gpu_scatter_add_kernel)
Instantiate_Template_Function(gpu_scatter_mul_kernel)
Instantiate_Template_Function(gpu_scatter_input_grad_kernel)
} // namespace operators
} // namespace paddle
|
edfe3adcaefe133424935cb6fcc926339bb4fca7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//CUDA by Example Page 63
#include<iostream>
using namespace std;
__global__ void addition(int *a, int *b, int *c, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
while (large_id < n) {
//if(large_id < n) {
c[large_id] = a[large_id] + b[large_id];
large_id += blockDim.x*gridDim.x;
}
}
int main(void) {
int n;
cin>>n;
//int a[n],b[n],c[n];
int *a, *b, *c;
a = (int *)malloc(n * sizeof(int));
b = (int *)malloc(n * sizeof(int));
c = (int *)malloc(n * sizeof(int));
for(int i = 0; i < n; i++) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
int *dev_a, *dev_b, *dev_c;
hipMalloc(&dev_a, n * sizeof(int));
hipMalloc(&dev_b, n * sizeof(int));
hipMalloc(&dev_c, n * sizeof(int));
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, n * sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(dev_c, c, n * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addition), dim3(128),dim3(128), 0, 0, dev_a, dev_b, dev_c, n);
hipMemcpy(c, dev_c, n * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < n; i++) {
cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
}
//verify that gpu did work
int count = 0;
bool success = true;
for(int i = 0; i < n; i++) {
if((a[i] + b[i]) != c[i]) {
cout<<"Error in "<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
success = false;
count++;
}
}
if (success) cout<<"We did it"<<endl;
cout<<"Number of errors: "<<count<<endl;
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
//nvcc large_vectors.cu
//./a.exe
//nvprof ./a.exe
|
edfe3adcaefe133424935cb6fcc926339bb4fca7.cu
|
//CUDA by Example Page 63
#include<iostream>
using namespace std;
__global__ void addition(int *a, int *b, int *c, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
while (large_id < n) {
//if(large_id < n) {
c[large_id] = a[large_id] + b[large_id];
large_id += blockDim.x*gridDim.x;
}
}
int main(void) {
int n;
cin>>n;
//int a[n],b[n],c[n];
int *a, *b, *c;
a = (int *)malloc(n * sizeof(int));
b = (int *)malloc(n * sizeof(int));
c = (int *)malloc(n * sizeof(int));
for(int i = 0; i < n; i++) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, n * sizeof(int));
cudaMalloc(&dev_b, n * sizeof(int));
cudaMalloc(&dev_c, n * sizeof(int));
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n * sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_c, c, n * sizeof(int), cudaMemcpyHostToDevice);
addition<<<128,128>>>(dev_a, dev_b, dev_c, n);
cudaMemcpy(c, dev_c, n * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++) {
cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
}
//verify that gpu did work
int count = 0;
bool success = true;
for(int i = 0; i < n; i++) {
if((a[i] + b[i]) != c[i]) {
cout<<"Error in "<<a[i]<<"+"<<b[i]<<"="<<c[i]<<endl;
success = false;
count++;
}
}
if (success) cout<<"We did it"<<endl;
cout<<"Number of errors: "<<count<<endl;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
//nvcc large_vectors.cu
//./a.exe
//nvprof ./a.exe
|
9fa80b0e2adbf53414110d6b817ab35c88af2149.hip
|
// !!! This is a file automatically generated by hipify!!!
/* -----------------------------------------------------------------------------------------------
Name: Anand Jhunjhunwala
Roll No: 17EC30041
CUDA
Assignment 2: Matrix transpose using rectangular tile
------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define W 32 //tile dimention W*2W please keep it in multiple of 32
__host__ void RUN(hipError_t call)
{
hipError_t err = call;
if(err != hipSuccess)
{
fprintf(stderr, " Failed with error code %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void mat_transpose(float *d_A, float *d_B, int height, int width)
{
unsigned int x = blockIdx.x*2*W + threadIdx.x;
unsigned int y = blockIdx.y*W + threadIdx.y;
__shared__ float tile[W][2*W + 1];
for(int j=0; j<2*blockDim.x*(W/blockDim.x); j+= blockDim.x)
{
for(int i=0; i<blockDim.y*(W/blockDim.y); i+= blockDim.y)
{
if(x+j < width && y+i < height)
tile[threadIdx.y + i][threadIdx.x + j] = d_A[(y+i)*width + x + j];
}
}
__syncthreads();
x = blockIdx.y*W + threadIdx.x;
y = blockIdx.x*2*W + threadIdx.y;
for(int j=0; j<2*blockDim.x*(W/blockDim.x); j+= blockDim.x)
{
for(int i=0; i<blockDim.y*(W/blockDim.y); i+= blockDim.y)
{
if(y+j < width && x+i<height)
d_B[(y+j)*height + x + i]= tile[threadIdx.x+i][threadIdx.y + j];
}
}
}
int main()
{
int max_x = 32; //maximum_thread_per_block 1024
int test_case, N, k=1;
long int i,j;
float *d_A, *h_A, *d_B, *h_B, ms;
printf("\n Enter the number of test cases:");
scanf("%d", &test_case);
printf(" %d\n", test_case);
hipDeviceProp_t devp;
hipEvent_t startEvent, stopEvent;
RUN(hipGetDeviceProperties(&devp, 0));
int shared_mem_size = devp.sharedMemPerBlock;
RUN(hipSetDevice(0));
shared_mem_size = shared_mem_size/(2*sizeof(float));
shared_mem_size = sqrt(shared_mem_size);
if(shared_mem_size < W)
{
printf("\n Not enough shared memory space available \n");
printf("Please reduce W and try again\n");
exit(EXIT_FAILURE);
}
while(test_case)
{
RUN(hipEventCreate(&startEvent));
RUN(hipEventCreate(&stopEvent));
printf("\nRunning test case: %d",k);
printf("\n Enter dimention of Matrix:");
scanf("%d", &N);
printf(" %d\n", N);
h_A = (float *)malloc(N*N*sizeof(float));
h_B = (float *)malloc(N*N*sizeof(float));
printf("\n Enter entries of input matrix:\n");
for(i=0; i<N*N; i++)
{
scanf("%f", &h_A[i]);
}
RUN(hipMalloc((void **)&d_A, N*N*sizeof(float)));
RUN(hipMalloc((void **)&d_B, N*N*sizeof(float)));
RUN(hipMemcpy(d_A, h_A, N*N*sizeof(float), hipMemcpyHostToDevice));
if(N <= max_x)
{
dim3 grid(1,1,1);
dim3 block(N, N, 1);
printf("\nLaunching kernel ");
RUN(hipEventRecord(startEvent,0));
hipLaunchKernelGGL(( mat_transpose), dim3(grid),dim3(block), 0, 0, d_A, d_B, N, N);
RUN(hipEventRecord(stopEvent,0));
RUN(hipEventSynchronize(stopEvent));
RUN(hipEventElapsedTime(&ms, startEvent, stopEvent));
}
else
{
if(N%(2*W) == 0)
{
dim3 grid(N/(2*W), N/(W), 1);
dim3 block(max_x,max_x,1);
printf("\nLaunching kernel ");
RUN(hipEventRecord(startEvent,0));
hipLaunchKernelGGL(( mat_transpose), dim3(grid),dim3(block), 0, 0, d_A, d_B, N, N);
RUN(hipEventRecord(stopEvent,0));
RUN(hipEventSynchronize(stopEvent));
RUN(hipEventElapsedTime(&ms, startEvent, stopEvent));
}
else
{
dim3 grid(N/(2*W) +1, N/W, 1);
dim3 block(max_x,max_x,1);
printf("\nLaunching kernel ");
RUN(hipEventRecord(startEvent,0));
hipLaunchKernelGGL(( mat_transpose), dim3(grid),dim3(block), 0, 0, d_A, d_B, N, N);
RUN(hipEventRecord(stopEvent,0));
RUN(hipEventSynchronize(stopEvent));
RUN(hipEventElapsedTime(&ms, startEvent, stopEvent));
}
}
RUN(hipGetLastError());
RUN(hipMemcpy(h_B, d_B, N*N*sizeof(float), hipMemcpyDeviceToHost));
printf("\n Kernel launch complete \n time taken: %.6f ms\n", ms);
printf("\nPrinting Output:\n");
for(i=0; i<N; i++)
{
for(j=0; j<N; j++) {
printf("%.2f ", h_B[i*N + j]);
}
printf("\n");
}
printf("\n End of test case: %d\n", k);
ms =0;
free(h_A);
free(h_B);
hipFree(d_A);
hipFree(d_B);
test_case = test_case -1;
k = k+1;
RUN(hipEventDestroy(startEvent));
RUN(hipEventDestroy(stopEvent));
}
printf("\n All test cases complete\n");
return 0;
}
|
9fa80b0e2adbf53414110d6b817ab35c88af2149.cu
|
/* -----------------------------------------------------------------------------------------------
Name: Anand Jhunjhunwala
Roll No: 17EC30041
CUDA
Assignment 2: Matrix transpose using rectangular tile
------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define W 32 //tile dimention W*2W please keep it in multiple of 32
__host__ void RUN(cudaError_t call)
{
cudaError_t err = call;
if(err != cudaSuccess)
{
fprintf(stderr, " Failed with error code %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void mat_transpose(float *d_A, float *d_B, int height, int width)
{
unsigned int x = blockIdx.x*2*W + threadIdx.x;
unsigned int y = blockIdx.y*W + threadIdx.y;
__shared__ float tile[W][2*W + 1];
for(int j=0; j<2*blockDim.x*(W/blockDim.x); j+= blockDim.x)
{
for(int i=0; i<blockDim.y*(W/blockDim.y); i+= blockDim.y)
{
if(x+j < width && y+i < height)
tile[threadIdx.y + i][threadIdx.x + j] = d_A[(y+i)*width + x + j];
}
}
__syncthreads();
x = blockIdx.y*W + threadIdx.x;
y = blockIdx.x*2*W + threadIdx.y;
for(int j=0; j<2*blockDim.x*(W/blockDim.x); j+= blockDim.x)
{
for(int i=0; i<blockDim.y*(W/blockDim.y); i+= blockDim.y)
{
if(y+j < width && x+i<height)
d_B[(y+j)*height + x + i]= tile[threadIdx.x+i][threadIdx.y + j];
}
}
}
int main()
{
int max_x = 32; //maximum_thread_per_block 1024
int test_case, N, k=1;
long int i,j;
float *d_A, *h_A, *d_B, *h_B, ms;
printf("\n Enter the number of test cases:");
scanf("%d", &test_case);
printf(" %d\n", test_case);
cudaDeviceProp devp;
cudaEvent_t startEvent, stopEvent;
RUN(cudaGetDeviceProperties(&devp, 0));
int shared_mem_size = devp.sharedMemPerBlock;
RUN(cudaSetDevice(0));
shared_mem_size = shared_mem_size/(2*sizeof(float));
shared_mem_size = sqrt(shared_mem_size);
if(shared_mem_size < W)
{
printf("\n Not enough shared memory space available \n");
printf("Please reduce W and try again\n");
exit(EXIT_FAILURE);
}
while(test_case)
{
RUN(cudaEventCreate(&startEvent));
RUN(cudaEventCreate(&stopEvent));
printf("\nRunning test case: %d",k);
printf("\n Enter dimention of Matrix:");
scanf("%d", &N);
printf(" %d\n", N);
h_A = (float *)malloc(N*N*sizeof(float));
h_B = (float *)malloc(N*N*sizeof(float));
printf("\n Enter entries of input matrix:\n");
for(i=0; i<N*N; i++)
{
scanf("%f", &h_A[i]);
}
RUN(cudaMalloc((void **)&d_A, N*N*sizeof(float)));
RUN(cudaMalloc((void **)&d_B, N*N*sizeof(float)));
RUN(cudaMemcpy(d_A, h_A, N*N*sizeof(float), cudaMemcpyHostToDevice));
if(N <= max_x)
{
dim3 grid(1,1,1);
dim3 block(N, N, 1);
printf("\nLaunching kernel ");
RUN(cudaEventRecord(startEvent,0));
mat_transpose<<<grid,block>>>(d_A, d_B, N, N);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&ms, startEvent, stopEvent));
}
else
{
if(N%(2*W) == 0)
{
dim3 grid(N/(2*W), N/(W), 1);
dim3 block(max_x,max_x,1);
printf("\nLaunching kernel ");
RUN(cudaEventRecord(startEvent,0));
mat_transpose<<<grid,block>>>(d_A, d_B, N, N);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&ms, startEvent, stopEvent));
}
else
{
dim3 grid(N/(2*W) +1, N/W, 1);
dim3 block(max_x,max_x,1);
printf("\nLaunching kernel ");
RUN(cudaEventRecord(startEvent,0));
mat_transpose<<<grid,block>>>(d_A, d_B, N, N);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&ms, startEvent, stopEvent));
}
}
RUN(cudaGetLastError());
RUN(cudaMemcpy(h_B, d_B, N*N*sizeof(float), cudaMemcpyDeviceToHost));
printf("\n Kernel launch complete \n time taken: %.6f ms\n", ms);
printf("\nPrinting Output:\n");
for(i=0; i<N; i++)
{
for(j=0; j<N; j++) {
printf("%.2f ", h_B[i*N + j]);
}
printf("\n");
}
printf("\n End of test case: %d\n", k);
ms =0;
free(h_A);
free(h_B);
cudaFree(d_A);
cudaFree(d_B);
test_case = test_case -1;
k = k+1;
RUN(cudaEventDestroy(startEvent));
RUN(cudaEventDestroy(stopEvent));
}
printf("\n All test cases complete\n");
return 0;
}
|
678acfb5529bd6fd181f4901c92208485abe3f84.hip
|
// !!! This is a file automatically generated by hipify!!!
//Programa que implementa el algoritmo de aplicacin de algunos filtros sobre una imagen usando CUDA
#include <stdio.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//Namespaces.
using namespace std;
using namespace cv;
//Filtro que se aplica.
__device__ bool aplicar_filtro(int rojo, int verde, int azul, int filtro_a_aplicar)
{
switch (filtro_a_aplicar)
{
//FILTRO AMARILLO
case 1:
if (rojo > 200 && verde > 100 && azul < 85)
{
return true;
}
break;
//FILTRO AZUL
case 2:
if (rojo < 80 && verde > 130 && azul > 170)
{
return true;
}
break;
//FILTRO VERDE
case 3:
if (rojo < 91 && verde > 159 && azul < 91)
{
return true;
}
break;
default:
break;
}
return false;
}
__device__ int filtro(const int *imagen, int x, int cantidad, int id, int num_filtro)
{
int n = imagen[(id * cantidad) + x];
int rojo = 0, verde = 0, azul = 0, promedio = 0;
azul += (n % 1000);
verde += (n / 1000) % 1000;
rojo += (n / 1000000) % 1000;
promedio = ((azul + verde + rojo) / 3);
if (!aplicar_filtro(rojo, verde, azul, num_filtro))
{
azul = promedio;
verde = promedio;
rojo = promedio;
}
return (rojo * 1000000) + (verde * 1000) + azul;
}
//Funcin que ejecuta cada hilo.
__global__ void hilo_filtro(const int *d_imagen_rgb, const int ancho, const int alto, const int total_hilos, int *d_imagen_filtrada, int num_filtro)
{
//Se calcula cuantas posiciones tomara cada hilo
int cantidad = (alto * ancho) / (gridDim.x * blockDim.x);
int id = blockDim.x * blockIdx.x + threadIdx.x;
//printf("Hilo %u desde %u hasta %u \n", id, id*cantidad, (cantidad*id) + cantidad);
for (int i = 0; i < cantidad; i++)
{ //printf("Hilo %u - en la posicion %u\n", id,((id * cantidad) + i));
d_imagen_filtrada[(id * cantidad) + i] = filtro(d_imagen_rgb, i, cantidad, id, num_filtro);
}
}
//Prototipos de funciones
Mat lectura_imagen(String nombre_imagen);
int main(int argc, char **argv)
{
//Variables.
char *nombre_imagen;
Mat imagen, imagen_filtrada;
int num_hilos, num_bloques, num_filtro, iteracion;
//Recibir argumentos.
nombre_imagen = argv[1];
num_hilos = atoi(argv[2]);
num_filtro = atoi(argv[3]);
num_bloques = atoi(argv[4]);
iteracion = atoi(argv[5]);
if (argc != 6)
{
cout << "Numero incorrecto de argumentos.\n";
return -1;
}
//Leer imagen
imagen = lectura_imagen(nombre_imagen);
//Inicializar variables
int ancho = imagen.cols;
int alto = imagen.rows;
imagen_filtrada = imagen.clone();
hipError_t err = hipSuccess;
//Malloc host
int num_elementos = ancho * alto;
size_t size = num_elementos * sizeof(int);
int *h_imagen_rgb = (int *)malloc(size);
int *h_imagen_filtrada = (int *)malloc(size);
//Imagen a un vector 3D
int aux = 0;
for (int i = 0; i < ancho; i++)
{
for (int j = 0; j < alto; j++)
{
h_imagen_rgb[aux] = imagen.at<Vec3b>(j, i)[0];
h_imagen_rgb[aux] += imagen.at<Vec3b>(j, i)[1] * 1000;
h_imagen_rgb[aux] += imagen.at<Vec3b>(j, i)[2] * 1000000;
aux++;
}
}
//Reserva de memoria en el dispositivo
//Imagen
int *d_imagen_rgb = NULL;
err = hipMalloc((void **)&d_imagen_rgb, size);
if (err != hipSuccess)
{
cout << "Error separando espacio imagen normal en GPU " << hipGetErrorString(err) << endl;
return -1;
}
//Resultado
int *d_imagen_filtrada = NULL;
err = hipMalloc((void **)&d_imagen_filtrada, size);
if (err != hipSuccess)
{
cout << "Error separando espacio imagen filtrada en GPU " << hipGetErrorString(err) << endl;
return -1;
}
//Copia de imagen desde el host al dispositivo
err = hipMemcpy(d_imagen_rgb, h_imagen_rgb, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
cout << "Error copiando datos a GPU " << hipGetErrorString(err) << endl;
return -1;
}
//Lanzar kernel GPU
hipLaunchKernelGGL(( hilo_filtro), dim3(num_bloques), dim3(num_hilos), 0, 0, d_imagen_rgb, ancho, alto, alto, d_imagen_filtrada, num_filtro);
err = hipGetLastError();
if (err != hipSuccess)
{
cout << "Fallo al lanzar Kernel de GPU " << hipGetErrorString(err) << endl;
return -1;
}
//Copiar de GPU a CPU
err = hipMemcpy(h_imagen_filtrada, d_imagen_filtrada, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
cout << "Error copiando desde GPU a CPU " << hipGetErrorString(err) << endl;
return -1;
}
//Escribir imagen difuminada.
aux = 0;
for (int i = 0; i < ancho; i++)
{
for (int j = 0; j < alto; j++)
{
imagen_filtrada.at<Vec3b>(j, i)[0] = (unsigned char)((h_imagen_filtrada[aux]) % 1000);
imagen_filtrada.at<Vec3b>(j, i)[1] = (unsigned char)((h_imagen_filtrada[aux] / 1000) % 1000);
imagen_filtrada.at<Vec3b>(j, i)[2] = (unsigned char)((h_imagen_filtrada[aux] / 1000000) % 1000);
aux++;
}
}
//Para guardar la imagen y verificar que el algoritmo hace lo que debe hacer
/*String nombre_archivo = "./Resultados/filtro_" + to_string(num_filtro) + "_"+ to_string(num_bloques) + "_bloques_";
imwrite(nombre_archivo += nombre_imagen, imagen_filtrada);*/
//Liberar espacio
err = hipFree(d_imagen_rgb);
if (err != hipSuccess)
{
cout << "Error liberando memoria de imagen normal " << hipGetErrorString(err) << endl;
return -1;
}
err = hipFree(d_imagen_filtrada);
if (err != hipSuccess)
{
cout << "Error liberando memoria de imagen difuminada " << hipGetErrorString(err) << endl;
return -1;
}
free(h_imagen_rgb);
free(h_imagen_filtrada);
return 0;
}
/*****Procedimiento que lee la imagen******/
Mat lectura_imagen(String nombre_imagen)
{
// Lectura de la imagen
Mat imagen = imread("./Assets/" + nombre_imagen, 1);
// Manejo de error en caso de que no sea encontrada la imagen
if (imagen.empty())
{
cout << "Archivo de imagen "
<< "No encontrado" << endl;
cin.get();
return imagen;
}
return imagen;
}
|
678acfb5529bd6fd181f4901c92208485abe3f84.cu
|
//Programa que implementa el algoritmo de aplicación de algunos filtros sobre una imagen usando CUDA
#include <stdio.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <cuda.h>
#include <cuda_runtime.h>
//Namespaces.
using namespace std;
using namespace cv;
//Filtro que se aplica.
__device__ bool aplicar_filtro(int rojo, int verde, int azul, int filtro_a_aplicar)
{
switch (filtro_a_aplicar)
{
//FILTRO AMARILLO
case 1:
if (rojo > 200 && verde > 100 && azul < 85)
{
return true;
}
break;
//FILTRO AZUL
case 2:
if (rojo < 80 && verde > 130 && azul > 170)
{
return true;
}
break;
//FILTRO VERDE
case 3:
if (rojo < 91 && verde > 159 && azul < 91)
{
return true;
}
break;
default:
break;
}
return false;
}
__device__ int filtro(const int *imagen, int x, int cantidad, int id, int num_filtro)
{
int n = imagen[(id * cantidad) + x];
int rojo = 0, verde = 0, azul = 0, promedio = 0;
azul += (n % 1000);
verde += (n / 1000) % 1000;
rojo += (n / 1000000) % 1000;
promedio = ((azul + verde + rojo) / 3);
if (!aplicar_filtro(rojo, verde, azul, num_filtro))
{
azul = promedio;
verde = promedio;
rojo = promedio;
}
return (rojo * 1000000) + (verde * 1000) + azul;
}
//Función que ejecuta cada hilo.
__global__ void hilo_filtro(const int *d_imagen_rgb, const int ancho, const int alto, const int total_hilos, int *d_imagen_filtrada, int num_filtro)
{
//Se calcula cuantas posiciones tomara cada hilo
int cantidad = (alto * ancho) / (gridDim.x * blockDim.x);
int id = blockDim.x * blockIdx.x + threadIdx.x;
//printf("Hilo %u desde %u hasta %u \n", id, id*cantidad, (cantidad*id) + cantidad);
for (int i = 0; i < cantidad; i++)
{ //printf("Hilo %u - en la posicion %u\n", id,((id * cantidad) + i));
d_imagen_filtrada[(id * cantidad) + i] = filtro(d_imagen_rgb, i, cantidad, id, num_filtro);
}
}
//Prototipos de funciones
Mat lectura_imagen(String nombre_imagen);
int main(int argc, char **argv)
{
//Variables.
char *nombre_imagen;
Mat imagen, imagen_filtrada;
int num_hilos, num_bloques, num_filtro, iteracion;
//Recibir argumentos.
nombre_imagen = argv[1];
num_hilos = atoi(argv[2]);
num_filtro = atoi(argv[3]);
num_bloques = atoi(argv[4]);
iteracion = atoi(argv[5]);
if (argc != 6)
{
cout << "Numero incorrecto de argumentos.\n";
return -1;
}
//Leer imagen
imagen = lectura_imagen(nombre_imagen);
//Inicializar variables
int ancho = imagen.cols;
int alto = imagen.rows;
imagen_filtrada = imagen.clone();
cudaError_t err = cudaSuccess;
//Malloc host
int num_elementos = ancho * alto;
size_t size = num_elementos * sizeof(int);
int *h_imagen_rgb = (int *)malloc(size);
int *h_imagen_filtrada = (int *)malloc(size);
//Imagen a un vector 3D
int aux = 0;
for (int i = 0; i < ancho; i++)
{
for (int j = 0; j < alto; j++)
{
h_imagen_rgb[aux] = imagen.at<Vec3b>(j, i)[0];
h_imagen_rgb[aux] += imagen.at<Vec3b>(j, i)[1] * 1000;
h_imagen_rgb[aux] += imagen.at<Vec3b>(j, i)[2] * 1000000;
aux++;
}
}
//Reserva de memoria en el dispositivo
//Imagen
int *d_imagen_rgb = NULL;
err = cudaMalloc((void **)&d_imagen_rgb, size);
if (err != cudaSuccess)
{
cout << "Error separando espacio imagen normal en GPU " << cudaGetErrorString(err) << endl;
return -1;
}
//Resultado
int *d_imagen_filtrada = NULL;
err = cudaMalloc((void **)&d_imagen_filtrada, size);
if (err != cudaSuccess)
{
cout << "Error separando espacio imagen filtrada en GPU " << cudaGetErrorString(err) << endl;
return -1;
}
//Copia de imagen desde el host al dispositivo
err = cudaMemcpy(d_imagen_rgb, h_imagen_rgb, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
cout << "Error copiando datos a GPU " << cudaGetErrorString(err) << endl;
return -1;
}
//Lanzar kernel GPU
hilo_filtro<<<num_bloques, num_hilos>>>(d_imagen_rgb, ancho, alto, alto, d_imagen_filtrada, num_filtro);
err = cudaGetLastError();
if (err != cudaSuccess)
{
cout << "Fallo al lanzar Kernel de GPU " << cudaGetErrorString(err) << endl;
return -1;
}
//Copiar de GPU a CPU
err = cudaMemcpy(h_imagen_filtrada, d_imagen_filtrada, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
cout << "Error copiando desde GPU a CPU " << cudaGetErrorString(err) << endl;
return -1;
}
//Escribir imagen difuminada.
aux = 0;
for (int i = 0; i < ancho; i++)
{
for (int j = 0; j < alto; j++)
{
imagen_filtrada.at<Vec3b>(j, i)[0] = (unsigned char)((h_imagen_filtrada[aux]) % 1000);
imagen_filtrada.at<Vec3b>(j, i)[1] = (unsigned char)((h_imagen_filtrada[aux] / 1000) % 1000);
imagen_filtrada.at<Vec3b>(j, i)[2] = (unsigned char)((h_imagen_filtrada[aux] / 1000000) % 1000);
aux++;
}
}
//Para guardar la imagen y verificar que el algoritmo hace lo que debe hacer
/*String nombre_archivo = "./Resultados/filtro_" + to_string(num_filtro) + "_"+ to_string(num_bloques) + "_bloques_";
imwrite(nombre_archivo += nombre_imagen, imagen_filtrada);*/
//Liberar espacio
err = cudaFree(d_imagen_rgb);
if (err != cudaSuccess)
{
cout << "Error liberando memoria de imagen normal " << cudaGetErrorString(err) << endl;
return -1;
}
err = cudaFree(d_imagen_filtrada);
if (err != cudaSuccess)
{
cout << "Error liberando memoria de imagen difuminada " << cudaGetErrorString(err) << endl;
return -1;
}
free(h_imagen_rgb);
free(h_imagen_filtrada);
return 0;
}
/*****Procedimiento que lee la imagen******/
Mat lectura_imagen(String nombre_imagen)
{
// Lectura de la imagen
Mat imagen = imread("./Assets/" + nombre_imagen, 1);
// Manejo de error en caso de que no sea encontrada la imagen
if (imagen.empty())
{
cout << "Archivo de imagen "
<< "No encontrado" << endl;
cin.get();
return imagen;
}
return imagen;
}
|
4d376d81cc429302b276815253bd54389aaaa2e3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Suma Matrices por Columna*/
#include <iostream>
using namespace std;
__global__
void sumaMatrizKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n)
{
for(int j=0;j<n;j++)
C[i+j*n] = A[i+j*n] + B[i+j*n];
}
}
void sumaMatrix(float* A, float* B, float* C, int tam)
{
int size = (tam*tam) * sizeof(float);
float *d_A,*d_B,*d_C;
hipMalloc((void**)&d_A,size);
hipMalloc((void**)&d_B,size);
hipMalloc((void**)&d_C,size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumaMatrizKernel), dim3(ceil(tam/256.0)),dim3(256), 0, 0, d_A,d_B,d_C,tam);
hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
hipFree(d_A);hipFree(d_B);hipFree(d_C);
}
int main()
{
int n = 10;
float *h_A,*h_B,*h_C;
h_A = new float[n*n];
h_B = new float[n*n];
h_C = new float[n*n];
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_A[i*n+j] = rand() % 100;
}
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_B[i*n+j] = rand() % 100;
}
cout<<"Los vectores generados son: "<<endl;
for(int i = 0; i < n; i++){
cout<<h_A[i]<<" ; ";
}
cout<<endl;
for(int i = 0; i < n; i++){
cout<<h_B[i]<<" ; ";
}
cout<<endl;
sumaMatrix(h_A,h_B,h_C,n);
cout<<" Suma "<<endl;
for(int i = 0; i < n; i++){
cout<<h_C[i]<<" ; ";
}
cout<<endl;
return 0;
}
|
4d376d81cc429302b276815253bd54389aaaa2e3.cu
|
/*Suma Matrices por Columna*/
#include <iostream>
using namespace std;
__global__
void sumaMatrizKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n)
{
for(int j=0;j<n;j++)
C[i+j*n] = A[i+j*n] + B[i+j*n];
}
}
void sumaMatrix(float* A, float* B, float* C, int tam)
{
int size = (tam*tam) * sizeof(float);
float *d_A,*d_B,*d_C;
cudaMalloc((void**)&d_A,size);
cudaMalloc((void**)&d_B,size);
cudaMalloc((void**)&d_C,size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
sumaMatrizKernel<<<ceil(tam/256.0),256>>>(d_A,d_B,d_C,tam);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);
}
int main()
{
int n = 10;
float *h_A,*h_B,*h_C;
h_A = new float[n*n];
h_B = new float[n*n];
h_C = new float[n*n];
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_A[i*n+j] = rand() % 100;
}
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_B[i*n+j] = rand() % 100;
}
cout<<"Los vectores generados son: "<<endl;
for(int i = 0; i < n; i++){
cout<<h_A[i]<<" ; ";
}
cout<<endl;
for(int i = 0; i < n; i++){
cout<<h_B[i]<<" ; ";
}
cout<<endl;
sumaMatrix(h_A,h_B,h_C,n);
cout<<" Suma "<<endl;
for(int i = 0; i < n; i++){
cout<<h_C[i]<<" ; ";
}
cout<<endl;
return 0;
}
|
3488844932b0a9ec83572665afa22f8878a5db7f.hip
|
// !!! This is a file automatically generated by hipify!!!
// clang-format off
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <new>
#include "../../mem_alloc/mem_alloc_tp.h"
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[3]));
hipError_t err = hipSuccess;
if (argc == 4) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for cc
err = hipMalloc(&cc_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc cc_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
// err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int>*));
// if (err != hipSuccess) {
// fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n",
// num_edges, hipGetErrorString(err)); return -1;
// }
// err = hipMalloc(&context, sizeof(GraphChiContext));
// if (err != hipSuccess) {
// fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
// num_edges, hipGetErrorString(err)); return -1;
// }
vertex = (ChiVertex<int, int> **)my_obj_alloc.calloc<ChiVertex<int, int> *>(
num_nodes);
context = (GraphChiContext *)my_obj_alloc.calloc<GraphChiContext>(1);
printf("Start initCtx\n");
initContext(context, num_nodes, num_edges);
// hipDeviceSynchronize();
// err = hipGetLastError();
// if (err != hipSuccess) {
// fprintf(stderr, "ERROR: initCtx failed (%s)\n",
// hipGetErrorString(err)); return -1;
// }
printf("Start initObj\n");
part0_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
hipLaunchKernelGGL(( part_kern0_initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d,
inrow_d, incol_d);
hipDeviceSynchronize();
part1_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
hipLaunchKernelGGL(( part_kern1_initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d,
inrow_d, incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( kern_initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer5 = gettime();
printf("init time = %lf ms\n", (timer5 - timer3) * 1000);
// printf("%d %d \n",vertex[126]->numOutEdges(),vertex[25152]->numOutEdges()
// );
my_obj_alloc.create_table();
vfun_table = my_obj_alloc.get_vfun_table();
// Run CC for some iter. TO: convergence determination
double timer6 = gettime();
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
hipLaunchKernelGGL(( ConnectedComponent), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
// ConnectedComponent_vptr<<<grid, threads>>>(vertex, context,i);
printf("Finish ConnectedComponent\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("kernel time = %lf ms\n", (timer4 - timer6) * 1000);
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, cc_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result_CC.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
3488844932b0a9ec83572665afa22f8878a5db7f.cu
|
// clang-format off
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <new>
#include "../../mem_alloc/mem_alloc_tp.h"
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem, atoll(argv[3]));
cudaError_t err = cudaSuccess;
if (argc == 4) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for cc
err = cudaMalloc(&cc_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc cc_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext *context;
// err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int>*));
// if (err != cudaSuccess) {
// fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n",
// num_edges, cudaGetErrorString(err)); return -1;
// }
// err = cudaMalloc(&context, sizeof(GraphChiContext));
// if (err != cudaSuccess) {
// fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
// num_edges, cudaGetErrorString(err)); return -1;
// }
vertex = (ChiVertex<int, int> **)my_obj_alloc.calloc<ChiVertex<int, int> *>(
num_nodes);
context = (GraphChiContext *)my_obj_alloc.calloc<GraphChiContext>(1);
printf("Start initCtx\n");
initContext(context, num_nodes, num_edges);
// cudaDeviceSynchronize();
// err = cudaGetLastError();
// if (err != cudaSuccess) {
// fprintf(stderr, "ERROR: initCtx failed (%s)\n",
// cudaGetErrorString(err)); return -1;
// }
printf("Start initObj\n");
part0_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
part_kern0_initObject<<<grid, threads>>>(vertex, context, row_d, col_d,
inrow_d, incol_d);
cudaDeviceSynchronize();
part1_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
part_kern1_initObject<<<grid, threads>>>(vertex, context, row_d, col_d,
inrow_d, incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
kern_initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer5 = gettime();
printf("init time = %lf ms\n", (timer5 - timer3) * 1000);
// printf("%d %d \n",vertex[126]->numOutEdges(),vertex[25152]->numOutEdges()
// );
my_obj_alloc.create_table();
vfun_table = my_obj_alloc.get_vfun_table();
// Run CC for some iter. TO: convergence determination
double timer6 = gettime();
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
ConnectedComponent<<<grid, threads>>>(vertex, context, i);
// ConnectedComponent_vptr<<<grid, threads>>>(vertex, context,i);
printf("Finish ConnectedComponent\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("kernel time = %lf ms\n", (timer4 - timer6) * 1000);
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, cc_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, cc_d, num_nodes * sizeof(int),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result_CC.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
5752dbd6141a05725787bbd487d62ef5995ebd04.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 300
#define BLOCK_SIZE 15
struct timeval start, end;
// get global offset of a given block and given index in block
__device__ int global_offset(int block_row, int block_col, int row, int col) {
return block_row*BLOCK_SIZE*N + N*row + block_col*BLOCK_SIZE + col;
}
__global__ void matmul(int* a, int* b, int* c) {
// which block we're in
int block_row = blockIdx.y;
int block_col = blockIdx.x;
// row and col within block
int row = threadIdx.y;
int col = threadIdx.x;
int c_val = 0;
// for all blocks
for (int i = 0; i < gridDim.x; i++) {
// shared memory buffers
__shared__ int as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int bs[BLOCK_SIZE][BLOCK_SIZE];
// copy data to shared memory buffer
as[row][col] = a[global_offset(block_row, i, row, col)];
bs[row][col] = b[global_offset(i, block_col, row, col)];
__syncthreads();
// matrix multiplication for block
for (int j = 0; j < BLOCK_SIZE; j++) {
c_val += as[row][j] * bs[j][col];
}
__syncthreads();
}
c[global_offset(block_row, block_col, row, col)] = c_val;
}
int main() {
int a[N*N], b[N*N], c[N*N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the device
hipMalloc((void**)&dev_a, N*N*sizeof(int));
hipMalloc((void**)&dev_b, N*N*sizeof(int));
hipMalloc((void**)&dev_c, N*N*sizeof(int));
// fill arbitrary data into arrays
srand(5);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i*N+j] = rand();
b[i*N+j] = rand();
c[i*N+j] = 0.0;
}
}
// copy data from host to device
hipMemcpy(dev_a, a, N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*N*sizeof(int), hipMemcpyHostToDevice);
// thread and block sizes
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks((N+BLOCK_SIZE-1)/BLOCK_SIZE, (N+BLOCK_SIZE-1)/BLOCK_SIZE);
gettimeofday(&start, NULL);
// matrix multiplication kernel
hipLaunchKernelGGL(( matmul), dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
// copy data from device to host
hipMemcpy(c, dev_c, N*N*sizeof(int), hipMemcpyDeviceToHost);
// find sum
int sum = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
sum += c[i*N+j];
}
}
std::cout << "sum is " << sum << std::endl;
printf("Seconds elapsed: %f\n",
(end.tv_sec*1000000.0 + end.tv_usec - start.tv_sec*1000000.0 -
start.tv_usec) / 1000000.0);
// free the memory on device
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
}
|
5752dbd6141a05725787bbd487d62ef5995ebd04.cu
|
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 300
#define BLOCK_SIZE 15
struct timeval start, end;
// get global offset of a given block and given index in block
__device__ int global_offset(int block_row, int block_col, int row, int col) {
return block_row*BLOCK_SIZE*N + N*row + block_col*BLOCK_SIZE + col;
}
__global__ void matmul(int* a, int* b, int* c) {
// which block we're in
int block_row = blockIdx.y;
int block_col = blockIdx.x;
// row and col within block
int row = threadIdx.y;
int col = threadIdx.x;
int c_val = 0;
// for all blocks
for (int i = 0; i < gridDim.x; i++) {
// shared memory buffers
__shared__ int as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int bs[BLOCK_SIZE][BLOCK_SIZE];
// copy data to shared memory buffer
as[row][col] = a[global_offset(block_row, i, row, col)];
bs[row][col] = b[global_offset(i, block_col, row, col)];
__syncthreads();
// matrix multiplication for block
for (int j = 0; j < BLOCK_SIZE; j++) {
c_val += as[row][j] * bs[j][col];
}
__syncthreads();
}
c[global_offset(block_row, block_col, row, col)] = c_val;
}
int main() {
int a[N*N], b[N*N], c[N*N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the device
cudaMalloc((void**)&dev_a, N*N*sizeof(int));
cudaMalloc((void**)&dev_b, N*N*sizeof(int));
cudaMalloc((void**)&dev_c, N*N*sizeof(int));
// fill arbitrary data into arrays
srand(5);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i*N+j] = rand();
b[i*N+j] = rand();
c[i*N+j] = 0.0;
}
}
// copy data from host to device
cudaMemcpy(dev_a, a, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*N*sizeof(int), cudaMemcpyHostToDevice);
// thread and block sizes
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks((N+BLOCK_SIZE-1)/BLOCK_SIZE, (N+BLOCK_SIZE-1)/BLOCK_SIZE);
gettimeofday(&start, NULL);
// matrix multiplication kernel
matmul<<<blocks, threads>>>(dev_a, dev_b, dev_c);
cudaThreadSynchronize();
gettimeofday(&end, NULL);
// copy data from device to host
cudaMemcpy(c, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
// find sum
int sum = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
sum += c[i*N+j];
}
}
std::cout << "sum is " << sum << std::endl;
printf("Seconds elapsed: %f\n",
(end.tv_sec*1000000.0 + end.tv_usec - start.tv_sec*1000000.0 -
start.tv_usec) / 1000000.0);
// free the memory on device
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
887e5df28763054e97785bcf7a9a5c36787a9d22.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cassert>
#include<cmath>
#include"header.h"
using namespace std;
//__device__ double jacobian[9];
//__device__ double matrix_b[144]; ///(6x24 matrix contains derivative of shape function in physical coordinates)
///elemental computation uses isoparametric shape function
__device__ void element_stiffness(int elementno,float *d_material_data,float *elemental_data,int *node_connect,int node_index,int mxelement)
{
float jacobian[9];
float matrix_b[144];
double *gausspoints=new double [nint];
double *weights=new double [nint];
getGausspoints(gausspoints,weights,nint);
//printf("gausspoints : %f %f\n",gausspoints[0],gausspoints[1]);
float d_shapefn[24];
// printf("elementno :%d\n",elementno);
for(int m=0;m<nint;m++)
{
for(int n=0;n<nint;n++)
{
for(int p=0;p<nint;p++)
{
// cout<<"LOOP"<<"\n";
//cout<<"Points "<<gausspoints[m]<<"\t"<<gausspoints[n]<<"\t"<<gausspoints[p]<<"\n";
for(int i=0;i<144;i++) {matrix_b[i]=0;}// initialising
float determinant=0;
///////////////////////////keeping values of derivative of shape function in an array
for(int i=0;i<24;i++)
{ if(i<8)
d_shapefn[i]=d_phi(i+1,gausspoints[m],gausspoints[n],gausspoints[p],'z');
else if(i>=8 && i<16)
d_shapefn[i]=d_phi(i-7,gausspoints[m],gausspoints[n],gausspoints[p],'e');
else
d_shapefn[i]=d_phi(i-15,gausspoints[m],gausspoints[n],gausspoints[p],'j');
}
//__syncthreads();
/*
if(elementno==1)
{ for(int i=0;i<3;i++)
{
for(int j=0;j<8;j++)
{printf("%f ",d_shapefn[i*8+j]);}
printf("hello\n");
}}*/
//__syncthreads();
////////////////calculating the jacobian value
jacobian[0]=jacobian[1]=jacobian[2]=jacobian[3]=0; //initialising
jacobian[4]=jacobian[5]=jacobian[6]=jacobian[7]=jacobian[8]=0;
//__syncthreads();
for(int j=0;j<8;j++)
{
//cout<<"node :"<<nodal_data[3*(connect_matrix[(elementno-1)*8+j])]<<nodal_data[3*(connect_matrix[(elementno-1)*8+j])+2];
float x,y,z;
x=dg_nodal_data[node_index*24+j*mxelement*3+elementno];
y=dg_nodal_data[node_index*24+j*mxelement*3+elementno+mxelement];
z=dg_nodal_data[node_index*24+j*mxelement*3+elementno+2*mxelement];
//x=dg_nodal_data[3*node_connect[(j)]];
//y=dg_nodal_data[3*(node_connect[j])+1];
//z=dg_nodal_data[3*(node_connect[j])+2];
jacobian[0]=jacobian[0]+d_shapefn[j]*x;
jacobian[3]=jacobian[3]+d_shapefn[j+8]*x;
jacobian[6]=jacobian[6]+d_shapefn[j+16]*x;
jacobian[1]=jacobian[1]+d_shapefn[j]*y;
jacobian[4]=jacobian[4]+d_shapefn[j+8]*y;
jacobian[7]=jacobian[7]+d_shapefn[j+16]*y;
jacobian[2]=jacobian[2]+d_shapefn[j]*z;
jacobian[5]=jacobian[5]+d_shapefn[j+8]*z;
jacobian[8]=jacobian[8]+d_shapefn[j+16]*z;
}
//__syncthreads();
////////////////////////////***********Determinant
determinant=jacobian[0]*(jacobian[4]*jacobian[8]-jacobian[5]*jacobian[7])-jacobian[1]*(jacobian[3]*jacobian[8]-jacobian[5]*jacobian[6])+jacobian[2]*(jacobian[3]*jacobian[7]-jacobian[4]*jacobian[6]);
// printf("determinant :%f\n",determinant);
/* cout<<"Jacobian "<<endl<<endl;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
cout<<jacobian[i*3+j]<<" ";
cout<<endl;
}
*/
////////**************************************inverse of jacobian
{
float k[9];
k[0]=(jacobian[4]*jacobian[8]-jacobian[5]*jacobian[7]);
k[1]=-1*(jacobian[3]*jacobian[8]-jacobian[5]*jacobian[6]);
k[2]=(jacobian[3]*jacobian[7]-jacobian[4]*jacobian[6]);
k[3]=-1*(jacobian[1]*jacobian[8]-jacobian[2]*jacobian[7]);
k[4]=(jacobian[0]*jacobian[8]-jacobian[2]*jacobian[6]);
k[5]=-1*(jacobian[0]*jacobian[7]-jacobian[1]*jacobian[6]);
k[6]=(jacobian[1]*jacobian[5]-jacobian[2]*jacobian[4]);
k[7]=-1*(jacobian[0]*jacobian[5]-jacobian[2]*jacobian[3]);
k[8]=(jacobian[0]*jacobian[4]-jacobian[1]*jacobian[3]);
jacobian[0]=k[0]/determinant;
jacobian[3]=k[1]/determinant;
jacobian[6]=k[2]/determinant;
jacobian[1]=k[3]/determinant;
jacobian[4]=k[4]/determinant;
jacobian[7]=k[5]/determinant;
jacobian[2]=k[6]/determinant;
jacobian[5]=k[7]/determinant;
jacobian[8]=k[8]/determinant;
}
/* cout<<"Inverse of jacobian "<<endl<<endl;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
cout<<jacobian[i*3+j]<<" ";
cout<<endl;
}
*/
////////////////////////////////////////////////calculating matrix_b
for(int i=0;i<8;i++)
{
matrix_b[i*3]=jacobian[0]*d_shapefn[i]+jacobian[1]*d_shapefn[i+8]+jacobian[2]*d_shapefn[i+16];
//cout<<jacobian[3]<<" "<<jacobian[4]<<" "<<jacobian[5]<<""<<endl;
//cout<<d_shapefn[i]<<" "<<d_shapefn[i+8]<<" "<<d_shapefn[i+16]<<endl;
matrix_b[i*3+25]=jacobian[3]*d_shapefn[i]+jacobian[4]*d_shapefn[i+8]+jacobian[5]*d_shapefn[i+16];
matrix_b[i*3+50]=jacobian[6]*d_shapefn[i]+jacobian[7]*d_shapefn[i+8]+jacobian[8]*d_shapefn[i+16];
matrix_b[i*3+73]=jacobian[6]*d_shapefn[i]+jacobian[7]*d_shapefn[i+8]+jacobian[8]*d_shapefn[i+16];
matrix_b[i*3+74]=jacobian[3]*d_shapefn[i]+jacobian[4]*d_shapefn[i+8]+jacobian[5]*d_shapefn[i+16];
matrix_b[i*3+96]=jacobian[6]*d_shapefn[i]+jacobian[7]*d_shapefn[i+8]+jacobian[8]*d_shapefn[i+16];
matrix_b[i*3+98]=jacobian[0]*d_shapefn[i]+jacobian[1]*d_shapefn[i+8]+jacobian[2]*d_shapefn[i+16];
matrix_b[i*3+120]=jacobian[3]*d_shapefn[i]+jacobian[4]*d_shapefn[i+8]+jacobian[5]*d_shapefn[i+16];
matrix_b[i*3+121]=jacobian[0]*d_shapefn[i]+jacobian[1]*d_shapefn[i+8]+jacobian[2]*d_shapefn[i+16];
}
/* cout<<"\n\n";
for(int i=0;i<6;i++)
{for(int j=0;j<24;j++)
cout<<matrix_b[i*24+j]<<" ";
cout<<"\n";}
*/
/////////////////////////////////multiplication B'CB(~~~~~~~another way is possible(suitable for GPU) in which each non zero element is calculated individually without using matrix_b)
for(int i=0;i<24;i++)
{
float a=0,b=0,c=0,d=0,e=0,f=0;
a=(matrix_b[i]*d_material_data[0]+matrix_b[i+24]*d_material_data[6]+matrix_b[i+48]*d_material_data[12]+matrix_b[i+72]*d_material_data[18]+matrix_b[i+96]*d_material_data[24]+matrix_b[i+120]*d_material_data[30]);
b=(matrix_b[i]*d_material_data[1]+matrix_b[i+24]*d_material_data[7]+matrix_b[i+48]*d_material_data[13]+matrix_b[i+72]*d_material_data[19]+matrix_b[i+96]*d_material_data[25]+matrix_b[i+120]*d_material_data[31]);
c=(matrix_b[i]*d_material_data[2]+matrix_b[i+24]*d_material_data[8]+matrix_b[i+48]*d_material_data[14]+matrix_b[i+72]*d_material_data[20]+matrix_b[i+96]*d_material_data[26]+matrix_b[i+120]*d_material_data[32]);
d=(matrix_b[i]*d_material_data[3]+matrix_b[i+24]*d_material_data[9]+matrix_b[i+48]*d_material_data[15]+matrix_b[i+72]*d_material_data[21]+matrix_b[i+96]*d_material_data[27]+matrix_b[i+120]*d_material_data[33]);
e=(matrix_b[i]*d_material_data[4]+matrix_b[i+24]*d_material_data[10]+matrix_b[i+48]*d_material_data[16]+matrix_b[i+72]*d_material_data[22]+matrix_b[i+96]*d_material_data[28]+matrix_b[i+120]*d_material_data[34]);
f=(matrix_b[i]*d_material_data[5]+matrix_b[i+24]*d_material_data[11]+matrix_b[i+48]*d_material_data[17]+matrix_b[i+72]*d_material_data[23]+matrix_b[i+96]*d_material_data[29]+matrix_b[i+120]*d_material_data[35]);
// cout<<"a "<<a<<" b "<<b<<" c "<<c<<" d "<<d<<" e "<<e<<" f "<<f<<"\n";
for(int j=0;j<24;j++)
{
elemental_data[i*24+j]=elemental_data[i*24+j]+(a*matrix_b[j]+b*matrix_b[j+24]+c*matrix_b[j+48]+d*matrix_b[j+72]+e*matrix_b[j+96]+f*matrix_b[j+120])*determinant*weights[m]*weights[n]*weights[p];
}
}
//__syncthreads();
/* cout<<"\n\n";
for(int i=0;i<24;i++)
{for(int j=0;j<24;j++)
cout<<elemental_data[i*24+j]<<" ";
cout<<"\n";}
*/
//******************calculating source vector************
/* for(int i=0;i<4;i++)
{
elemental_vector[2*i]+=phi(i+1,gausspoints[m],gausspoints[n],gausspoints[p])*determinant*weights[m]*weights[n]*0;
elemental_vector[2*i+1]+=phi(i+1,gausspoints[m],gausspoints[n],gausspoints[p])*determinant*weights[m]*weights[n]*0;
}*/
}
//__syncthreads();
}
//__syncthreads();
}
delete []gausspoints;
delete []weights;
}
/////////////////////////////////////////gives derivative of linear interpolation
/*double d_phi(int i,double point,char type)
{
assert(i>=1 && i<=4);
if(type=='z')
{
if(i==1 ||i==4)
return (-1+pow(-1,i+1)*point)/4;
return (1+point*pow(-1,i+1))/4;
}
else
{
if(i==1 ||i==2)
return (-1+point*pow(-1,i+1))/4;
return (1+point*pow(-1,i+1))/4;
}
}*/
__device__ double d_phi(int i,double zeta,double eta,double zi,char type)
{
double k=0;
double ret;
assert(i>=1 && i<=8);
switch(type)
{
case 'z':
k=phi(i,zeta,eta,zi);
if(i==1 || i==4 || i==5 || i==8)
ret=-1*k/(1-zeta);
else
ret=k/(1+zeta);
break;
case 'e':
k=phi(i,zeta,eta,zi);
if(i==1 || i==2 || i==5 || i==6)
ret=-1*k/(1-eta);
else
ret=k/(1+eta);
break;
case 'j':
k=phi(i,zeta,eta,zi);
if(i==1 || i==2 || i==3 || i==4)
ret=-1*k/(1-zi);
else
ret=k/(1+zi);
break;
default :
ret=0;//MA
break;
}
return ret;
}
///////////////////////////////////gives linear interpolation function
__device__ double phi(int i,double zeta,double eta,double zi)
{
assert(i>=1 && i<=8);
double ret=0;
switch(i)
{
case 1:
ret=0.125*(1-zeta)*(1-eta)*(1-zi);
break;
case 2:
ret=0.125*(1+zeta)*(1-eta)*(1-zi);
break;
case 3:
ret= 0.125*(1+zeta)*(1+eta)*(1-zi);
break;
case 4:
ret= 0.125*(1-zeta)*(1+eta)*(1-zi);
break;
case 5:
ret= 0.125*(1-zeta)*(1-eta)*(1+zi);
break;
case 6:
ret= 0.125*(1+zeta)*(1-eta)*(1+zi);
break;
case 7:
ret= 0.125*(1+zeta)*(1+eta)*(1+zi);
break;
case 8:
ret= 0.125*(1-zeta)*(1+eta)*(1+zi);
break;
default:
break;
}
return ret;
}
|
887e5df28763054e97785bcf7a9a5c36787a9d22.cu
|
#include<stdio.h>
#include<cassert>
#include<cmath>
#include"header.h"
using namespace std;
//__device__ double jacobian[9];
//__device__ double matrix_b[144]; ///(6x24 matrix contains derivative of shape function in physical coordinates)
///elemental computation uses isoparametric shape function
__device__ void element_stiffness(int elementno,float *d_material_data,float *elemental_data,int *node_connect,int node_index,int mxelement)
{
float jacobian[9];
float matrix_b[144];
double *gausspoints=new double [nint];
double *weights=new double [nint];
getGausspoints(gausspoints,weights,nint);
//printf("gausspoints : %f %f\n",gausspoints[0],gausspoints[1]);
float d_shapefn[24];
// printf("elementno :%d\n",elementno);
for(int m=0;m<nint;m++)
{
for(int n=0;n<nint;n++)
{
for(int p=0;p<nint;p++)
{
// cout<<"LOOP"<<"\n";
//cout<<"Points "<<gausspoints[m]<<"\t"<<gausspoints[n]<<"\t"<<gausspoints[p]<<"\n";
for(int i=0;i<144;i++) {matrix_b[i]=0;}// initialising
float determinant=0;
///////////////////////////keeping values of derivative of shape function in an array
for(int i=0;i<24;i++)
{ if(i<8)
d_shapefn[i]=d_phi(i+1,gausspoints[m],gausspoints[n],gausspoints[p],'z');
else if(i>=8 && i<16)
d_shapefn[i]=d_phi(i-7,gausspoints[m],gausspoints[n],gausspoints[p],'e');
else
d_shapefn[i]=d_phi(i-15,gausspoints[m],gausspoints[n],gausspoints[p],'j');
}
//__syncthreads();
/*
if(elementno==1)
{ for(int i=0;i<3;i++)
{
for(int j=0;j<8;j++)
{printf("%f ",d_shapefn[i*8+j]);}
printf("hello\n");
}}*/
//__syncthreads();
////////////////calculating the jacobian value
jacobian[0]=jacobian[1]=jacobian[2]=jacobian[3]=0; //initialising
jacobian[4]=jacobian[5]=jacobian[6]=jacobian[7]=jacobian[8]=0;
//__syncthreads();
for(int j=0;j<8;j++)
{
//cout<<"node :"<<nodal_data[3*(connect_matrix[(elementno-1)*8+j])]<<nodal_data[3*(connect_matrix[(elementno-1)*8+j])+2];
float x,y,z;
x=dg_nodal_data[node_index*24+j*mxelement*3+elementno];
y=dg_nodal_data[node_index*24+j*mxelement*3+elementno+mxelement];
z=dg_nodal_data[node_index*24+j*mxelement*3+elementno+2*mxelement];
//x=dg_nodal_data[3*node_connect[(j)]];
//y=dg_nodal_data[3*(node_connect[j])+1];
//z=dg_nodal_data[3*(node_connect[j])+2];
jacobian[0]=jacobian[0]+d_shapefn[j]*x;
jacobian[3]=jacobian[3]+d_shapefn[j+8]*x;
jacobian[6]=jacobian[6]+d_shapefn[j+16]*x;
jacobian[1]=jacobian[1]+d_shapefn[j]*y;
jacobian[4]=jacobian[4]+d_shapefn[j+8]*y;
jacobian[7]=jacobian[7]+d_shapefn[j+16]*y;
jacobian[2]=jacobian[2]+d_shapefn[j]*z;
jacobian[5]=jacobian[5]+d_shapefn[j+8]*z;
jacobian[8]=jacobian[8]+d_shapefn[j+16]*z;
}
//__syncthreads();
////////////////////////////***********Determinant
determinant=jacobian[0]*(jacobian[4]*jacobian[8]-jacobian[5]*jacobian[7])-jacobian[1]*(jacobian[3]*jacobian[8]-jacobian[5]*jacobian[6])+jacobian[2]*(jacobian[3]*jacobian[7]-jacobian[4]*jacobian[6]);
// printf("determinant :%f\n",determinant);
/* cout<<"Jacobian "<<endl<<endl;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
cout<<jacobian[i*3+j]<<" ";
cout<<endl;
}
*/
////////**************************************inverse of jacobian
{
float k[9];
k[0]=(jacobian[4]*jacobian[8]-jacobian[5]*jacobian[7]);
k[1]=-1*(jacobian[3]*jacobian[8]-jacobian[5]*jacobian[6]);
k[2]=(jacobian[3]*jacobian[7]-jacobian[4]*jacobian[6]);
k[3]=-1*(jacobian[1]*jacobian[8]-jacobian[2]*jacobian[7]);
k[4]=(jacobian[0]*jacobian[8]-jacobian[2]*jacobian[6]);
k[5]=-1*(jacobian[0]*jacobian[7]-jacobian[1]*jacobian[6]);
k[6]=(jacobian[1]*jacobian[5]-jacobian[2]*jacobian[4]);
k[7]=-1*(jacobian[0]*jacobian[5]-jacobian[2]*jacobian[3]);
k[8]=(jacobian[0]*jacobian[4]-jacobian[1]*jacobian[3]);
jacobian[0]=k[0]/determinant;
jacobian[3]=k[1]/determinant;
jacobian[6]=k[2]/determinant;
jacobian[1]=k[3]/determinant;
jacobian[4]=k[4]/determinant;
jacobian[7]=k[5]/determinant;
jacobian[2]=k[6]/determinant;
jacobian[5]=k[7]/determinant;
jacobian[8]=k[8]/determinant;
}
/* cout<<"Inverse of jacobian "<<endl<<endl;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
cout<<jacobian[i*3+j]<<" ";
cout<<endl;
}
*/
////////////////////////////////////////////////calculating matrix_b
for(int i=0;i<8;i++)
{
matrix_b[i*3]=jacobian[0]*d_shapefn[i]+jacobian[1]*d_shapefn[i+8]+jacobian[2]*d_shapefn[i+16];
//cout<<jacobian[3]<<" "<<jacobian[4]<<" "<<jacobian[5]<<""<<endl;
//cout<<d_shapefn[i]<<" "<<d_shapefn[i+8]<<" "<<d_shapefn[i+16]<<endl;
matrix_b[i*3+25]=jacobian[3]*d_shapefn[i]+jacobian[4]*d_shapefn[i+8]+jacobian[5]*d_shapefn[i+16];
matrix_b[i*3+50]=jacobian[6]*d_shapefn[i]+jacobian[7]*d_shapefn[i+8]+jacobian[8]*d_shapefn[i+16];
matrix_b[i*3+73]=jacobian[6]*d_shapefn[i]+jacobian[7]*d_shapefn[i+8]+jacobian[8]*d_shapefn[i+16];
matrix_b[i*3+74]=jacobian[3]*d_shapefn[i]+jacobian[4]*d_shapefn[i+8]+jacobian[5]*d_shapefn[i+16];
matrix_b[i*3+96]=jacobian[6]*d_shapefn[i]+jacobian[7]*d_shapefn[i+8]+jacobian[8]*d_shapefn[i+16];
matrix_b[i*3+98]=jacobian[0]*d_shapefn[i]+jacobian[1]*d_shapefn[i+8]+jacobian[2]*d_shapefn[i+16];
matrix_b[i*3+120]=jacobian[3]*d_shapefn[i]+jacobian[4]*d_shapefn[i+8]+jacobian[5]*d_shapefn[i+16];
matrix_b[i*3+121]=jacobian[0]*d_shapefn[i]+jacobian[1]*d_shapefn[i+8]+jacobian[2]*d_shapefn[i+16];
}
/* cout<<"\n\n";
for(int i=0;i<6;i++)
{for(int j=0;j<24;j++)
cout<<matrix_b[i*24+j]<<" ";
cout<<"\n";}
*/
/////////////////////////////////multiplication B'CB(~~~~~~~another way is possible(suitable for GPU) in which each non zero element is calculated individually without using matrix_b)
for(int i=0;i<24;i++)
{
float a=0,b=0,c=0,d=0,e=0,f=0;
a=(matrix_b[i]*d_material_data[0]+matrix_b[i+24]*d_material_data[6]+matrix_b[i+48]*d_material_data[12]+matrix_b[i+72]*d_material_data[18]+matrix_b[i+96]*d_material_data[24]+matrix_b[i+120]*d_material_data[30]);
b=(matrix_b[i]*d_material_data[1]+matrix_b[i+24]*d_material_data[7]+matrix_b[i+48]*d_material_data[13]+matrix_b[i+72]*d_material_data[19]+matrix_b[i+96]*d_material_data[25]+matrix_b[i+120]*d_material_data[31]);
c=(matrix_b[i]*d_material_data[2]+matrix_b[i+24]*d_material_data[8]+matrix_b[i+48]*d_material_data[14]+matrix_b[i+72]*d_material_data[20]+matrix_b[i+96]*d_material_data[26]+matrix_b[i+120]*d_material_data[32]);
d=(matrix_b[i]*d_material_data[3]+matrix_b[i+24]*d_material_data[9]+matrix_b[i+48]*d_material_data[15]+matrix_b[i+72]*d_material_data[21]+matrix_b[i+96]*d_material_data[27]+matrix_b[i+120]*d_material_data[33]);
e=(matrix_b[i]*d_material_data[4]+matrix_b[i+24]*d_material_data[10]+matrix_b[i+48]*d_material_data[16]+matrix_b[i+72]*d_material_data[22]+matrix_b[i+96]*d_material_data[28]+matrix_b[i+120]*d_material_data[34]);
f=(matrix_b[i]*d_material_data[5]+matrix_b[i+24]*d_material_data[11]+matrix_b[i+48]*d_material_data[17]+matrix_b[i+72]*d_material_data[23]+matrix_b[i+96]*d_material_data[29]+matrix_b[i+120]*d_material_data[35]);
// cout<<"a "<<a<<" b "<<b<<" c "<<c<<" d "<<d<<" e "<<e<<" f "<<f<<"\n";
for(int j=0;j<24;j++)
{
elemental_data[i*24+j]=elemental_data[i*24+j]+(a*matrix_b[j]+b*matrix_b[j+24]+c*matrix_b[j+48]+d*matrix_b[j+72]+e*matrix_b[j+96]+f*matrix_b[j+120])*determinant*weights[m]*weights[n]*weights[p];
}
}
//__syncthreads();
/* cout<<"\n\n";
for(int i=0;i<24;i++)
{for(int j=0;j<24;j++)
cout<<elemental_data[i*24+j]<<" ";
cout<<"\n";}
*/
//******************calculating source vector************
/* for(int i=0;i<4;i++)
{
elemental_vector[2*i]+=phi(i+1,gausspoints[m],gausspoints[n],gausspoints[p])*determinant*weights[m]*weights[n]*0;
elemental_vector[2*i+1]+=phi(i+1,gausspoints[m],gausspoints[n],gausspoints[p])*determinant*weights[m]*weights[n]*0;
}*/
}
//__syncthreads();
}
//__syncthreads();
}
delete []gausspoints;
delete []weights;
}
/////////////////////////////////////////gives derivative of linear interpolation
/*double d_phi(int i,double point,char type)
{
assert(i>=1 && i<=4);
if(type=='z')
{
if(i==1 ||i==4)
return (-1+pow(-1,i+1)*point)/4;
return (1+point*pow(-1,i+1))/4;
}
else
{
if(i==1 ||i==2)
return (-1+point*pow(-1,i+1))/4;
return (1+point*pow(-1,i+1))/4;
}
}*/
__device__ double d_phi(int i,double zeta,double eta,double zi,char type)
{
double k=0;
double ret;
assert(i>=1 && i<=8);
switch(type)
{
case 'z':
k=phi(i,zeta,eta,zi);
if(i==1 || i==4 || i==5 || i==8)
ret=-1*k/(1-zeta);
else
ret=k/(1+zeta);
break;
case 'e':
k=phi(i,zeta,eta,zi);
if(i==1 || i==2 || i==5 || i==6)
ret=-1*k/(1-eta);
else
ret=k/(1+eta);
break;
case 'j':
k=phi(i,zeta,eta,zi);
if(i==1 || i==2 || i==3 || i==4)
ret=-1*k/(1-zi);
else
ret=k/(1+zi);
break;
default :
ret=0;//MA
break;
}
return ret;
}
///////////////////////////////////gives linear interpolation function
__device__ double phi(int i,double zeta,double eta,double zi)
{
assert(i>=1 && i<=8);
double ret=0;
switch(i)
{
case 1:
ret=0.125*(1-zeta)*(1-eta)*(1-zi);
break;
case 2:
ret=0.125*(1+zeta)*(1-eta)*(1-zi);
break;
case 3:
ret= 0.125*(1+zeta)*(1+eta)*(1-zi);
break;
case 4:
ret= 0.125*(1-zeta)*(1+eta)*(1-zi);
break;
case 5:
ret= 0.125*(1-zeta)*(1-eta)*(1+zi);
break;
case 6:
ret= 0.125*(1+zeta)*(1-eta)*(1+zi);
break;
case 7:
ret= 0.125*(1+zeta)*(1+eta)*(1+zi);
break;
case 8:
ret= 0.125*(1-zeta)*(1+eta)*(1+zi);
break;
default:
break;
}
return ret;
}
|
b43e0e0aaefbd95fe4adea17f6ece46c93fd190d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rw_cuda_triples.h"
#include <iostream>
#include <thread>
#include <ATen/hip/HIPContext.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "../utils.h"
#include "utils.cuh"
namespace triples {
__device__ RelationTail sample_neighbor_gpu(int64_t target_node,
int64_t padding_index,
const torch::PackedTensorAccessor64<int64_t,2> relation_tail_index,
const torch::PackedTensorAccessor64<int64_t,2> triples_indexed,
hiprandState_t* rand_state
) {
RelationTail rt;
// if target node is padding index, then jump node becomes target node
if(target_node != padding_index){
// get the edge range for the target node
auto start_index = relation_tail_index[target_node][0];
auto end_index = relation_tail_index[target_node][1];
// randomly select an index in this range
if(start_index == -1 || end_index == -1){
rt.relation = padding_index;
rt.tail = padding_index;
}else{
auto nbr_edge_index = sample_int_gpu(start_index,end_index,rand_state);
// get the edge at this index
rt.relation = triples_indexed[nbr_edge_index][1];
rt.tail = triples_indexed[nbr_edge_index][2];
}
}else{
rt.relation = padding_index;
rt.tail = padding_index;
}
return rt;
}
__global__ void uniform_walk_triples_gpu(const torch::PackedTensorAccessor64<int64_t,2> walks,
const torch::PackedTensorAccessor64<int64_t,2> triples_indexed_accessor,
const torch::PackedTensorAccessor64<int64_t,2> relation_tail_index_accessor,
const torch::PackedTensorAccessor64<int64_t,1> target_nodes_accesor,
const int walk_length,
const int64_t padding_idx,
const int64_t num_nodes,
hiprandState_t* rand_states
) {
// get the thread
const auto thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// bound check
if(thread_index < num_nodes) {
// rng
auto rand_state = rand_states[thread_index];
// get the walk array for this node
auto walks_for_node = walks[thread_index];
// get the target node
int64_t target_node = target_nodes_accesor[thread_index];
// add target node as the first node in walk
walks_for_node[0] = target_node;
// start walk
int64_t previous_node = target_node;
for (int64_t walk_step=1;walk_step < walk_length;walk_step=walk_step+2){
// sample a neighor
auto next_rt = sample_neighbor_gpu(previous_node,
padding_idx,
relation_tail_index_accessor,
triples_indexed_accessor,
&rand_state);
walks_for_node[walk_step] = next_rt.relation;
walks_for_node[walk_step+1] = next_rt.tail;
// update previous node
previous_node = next_rt.tail;
}
}
}
__global__ void init_rand_states(const int64_t seed, hiprandState_t *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, idx, 0, &states[idx]);
}
torch::Tensor walk_triples_gpu(const torch::Tensor *triples_indexed,
const torch::Tensor *relation_tail_index,
const torch::Tensor *target_nodes,
const int walk_length,
const int64_t padding_idx,
const bool restart,
const int seed
) {
CHECK_CUDA((*triples_indexed));
CHECK_CUDA((*relation_tail_index));
CHECK_CUDA((*target_nodes));
// construct a tensor to hold the walks
auto walk_size = (walk_length * 2) + 1;
auto options = torch::TensorOptions().dtype(torch::kInt64).device(torch::kCUDA,target_nodes->device().index());
auto walks = torch::empty({(*target_nodes).size(0),walk_size},options);
// create accessors
auto walks_accessor = walks.packed_accessor64<int64_t,2>();
auto triples_indexed_accessor = triples_indexed->packed_accessor64<int64_t,2>();
auto relation_tail_index_accessor = relation_tail_index->packed_accessor64<int64_t,2>();
auto target_nodes_accesor = target_nodes->packed_accessor64<int64_t,1>();
// get the number of nodes
int64_t num_nodes = (*target_nodes).size(0);
// Thread block size
int NUM_THREADS = 1024;
// Grid size
int NUM_BLOCKS = int((num_nodes + NUM_THREADS - 1)/NUM_THREADS);
// active stream
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// random states
hiprandState_t *rand_states;
hipMalloc(&rand_states, NUM_THREADS * NUM_BLOCKS);
int64_t actual_seed = 0;
if(seed==0){
actual_seed = time(NULL);
}else{
actual_seed = actual_seed;
}
// init states
hipLaunchKernelGGL(( init_rand_states), dim3(NUM_BLOCKS), dim3(NUM_THREADS),0,stream, actual_seed,rand_states);
// perform walks
hipLaunchKernelGGL(( uniform_walk_triples_gpu), dim3(NUM_BLOCKS),dim3(NUM_THREADS),0,stream, walks_accessor,
triples_indexed_accessor,
relation_tail_index_accessor,
target_nodes_accesor,
walk_size,
padding_idx,
num_nodes,
rand_states
);
// delete rand states
hipFree(rand_states);
return walks;
}
}
|
b43e0e0aaefbd95fe4adea17f6ece46c93fd190d.cu
|
#include "rw_cuda_triples.h"
#include <iostream>
#include <thread>
#include <ATen/cuda/CUDAContext.h>
#include <curand.h>
#include <curand_kernel.h>
#include "../utils.h"
#include "utils.cuh"
namespace triples {
__device__ RelationTail sample_neighbor_gpu(int64_t target_node,
int64_t padding_index,
const torch::PackedTensorAccessor64<int64_t,2> relation_tail_index,
const torch::PackedTensorAccessor64<int64_t,2> triples_indexed,
curandState_t* rand_state
) {
RelationTail rt;
// if target node is padding index, then jump node becomes target node
if(target_node != padding_index){
// get the edge range for the target node
auto start_index = relation_tail_index[target_node][0];
auto end_index = relation_tail_index[target_node][1];
// randomly select an index in this range
if(start_index == -1 || end_index == -1){
rt.relation = padding_index;
rt.tail = padding_index;
}else{
auto nbr_edge_index = sample_int_gpu(start_index,end_index,rand_state);
// get the edge at this index
rt.relation = triples_indexed[nbr_edge_index][1];
rt.tail = triples_indexed[nbr_edge_index][2];
}
}else{
rt.relation = padding_index;
rt.tail = padding_index;
}
return rt;
}
__global__ void uniform_walk_triples_gpu(const torch::PackedTensorAccessor64<int64_t,2> walks,
const torch::PackedTensorAccessor64<int64_t,2> triples_indexed_accessor,
const torch::PackedTensorAccessor64<int64_t,2> relation_tail_index_accessor,
const torch::PackedTensorAccessor64<int64_t,1> target_nodes_accesor,
const int walk_length,
const int64_t padding_idx,
const int64_t num_nodes,
curandState* rand_states
) {
// get the thread
const auto thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// bound check
if(thread_index < num_nodes) {
// rng
auto rand_state = rand_states[thread_index];
// get the walk array for this node
auto walks_for_node = walks[thread_index];
// get the target node
int64_t target_node = target_nodes_accesor[thread_index];
// add target node as the first node in walk
walks_for_node[0] = target_node;
// start walk
int64_t previous_node = target_node;
for (int64_t walk_step=1;walk_step < walk_length;walk_step=walk_step+2){
// sample a neighor
auto next_rt = sample_neighbor_gpu(previous_node,
padding_idx,
relation_tail_index_accessor,
triples_indexed_accessor,
&rand_state);
walks_for_node[walk_step] = next_rt.relation;
walks_for_node[walk_step+1] = next_rt.tail;
// update previous node
previous_node = next_rt.tail;
}
}
}
__global__ void init_rand_states(const int64_t seed, curandState *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, idx, 0, &states[idx]);
}
torch::Tensor walk_triples_gpu(const torch::Tensor *triples_indexed,
const torch::Tensor *relation_tail_index,
const torch::Tensor *target_nodes,
const int walk_length,
const int64_t padding_idx,
const bool restart,
const int seed
) {
CHECK_CUDA((*triples_indexed));
CHECK_CUDA((*relation_tail_index));
CHECK_CUDA((*target_nodes));
// construct a tensor to hold the walks
auto walk_size = (walk_length * 2) + 1;
auto options = torch::TensorOptions().dtype(torch::kInt64).device(torch::kCUDA,target_nodes->device().index());
auto walks = torch::empty({(*target_nodes).size(0),walk_size},options);
// create accessors
auto walks_accessor = walks.packed_accessor64<int64_t,2>();
auto triples_indexed_accessor = triples_indexed->packed_accessor64<int64_t,2>();
auto relation_tail_index_accessor = relation_tail_index->packed_accessor64<int64_t,2>();
auto target_nodes_accesor = target_nodes->packed_accessor64<int64_t,1>();
// get the number of nodes
int64_t num_nodes = (*target_nodes).size(0);
// Thread block size
int NUM_THREADS = 1024;
// Grid size
int NUM_BLOCKS = int((num_nodes + NUM_THREADS - 1)/NUM_THREADS);
// active stream
auto stream = at::cuda::getCurrentCUDAStream();
// random states
curandState *rand_states;
cudaMalloc(&rand_states, NUM_THREADS * NUM_BLOCKS);
int64_t actual_seed = 0;
if(seed==0){
actual_seed = time(NULL);
}else{
actual_seed = actual_seed;
}
// init states
init_rand_states<<<NUM_BLOCKS, NUM_THREADS,0,stream>>>(actual_seed,rand_states);
// perform walks
uniform_walk_triples_gpu<<<NUM_BLOCKS,NUM_THREADS,0,stream>>>(walks_accessor,
triples_indexed_accessor,
relation_tail_index_accessor,
target_nodes_accesor,
walk_size,
padding_idx,
num_nodes,
rand_states
);
// delete rand states
cudaFree(rand_states);
return walks;
}
}
|
7aa793b81c6f693e920891d22cf2aed0a1ad2d5d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <utility>
#include "triangle.cuh"
#include "slicer.cuh"
#include "golden.cuh"
#define NOW (std::chrono::system_clock::now())
typedef std::chrono::time_point<std::chrono::system_clock> chrono_t;
void timer_checkpoint(chrono_t & checkpoint) {
#ifdef TEST
chrono_t end = NOW;
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - checkpoint);
std::cout << duration.count() << "ms" << std::endl;
checkpoint = end;
#else
std::cout << std::endl;
#endif
}
int main(int argc, char* argv[]) {
std::string stl_file_name;
std::vector<triangle> small_tri;
chrono_t start;
if (argc == 2) {
stl_file_name = argv[1];
} else if (argc > 2) {
std::cout << "ERROR: Too many command line arguments" << std::endl;
return 0;
} else {
std::cout << "ERROR: Too few command line arguments" << std::endl;
return 0;
}
start = NOW;
read_stl(stl_file_name, small_tri);
size_t num_small = small_tri.size();
std::cout << "Reading STL file... ";
timer_checkpoint(start);
std::cout << "Allocating device memory... ";
triangle* small_tri_dev;
hipMalloc(&small_tri_dev, num_small * sizeof(triangle));
hipMemcpy(small_tri_dev, small_tri.data(), num_small * sizeof(triangle), hipMemcpyHostToDevice);
layer_t* intersections_large;
hipMalloc(&intersections_large, Y_DIM * X_DIM * NUM_LAYERS * sizeof(layer_t));
size_t* trunk_length;
hipMalloc(&trunk_length, Y_DIM * X_DIM * sizeof(size_t));
// out[z][y][x]
bool* out = (bool*)malloc(NUM_LAYERS * Y_DIM * X_DIM * sizeof(bool));
bool* out_dev;
hipMalloc(&out_dev, Y_DIM * X_DIM * NUM_LAYERS * sizeof(bool));
hipMemset(out_dev, 0, Y_DIM * X_DIM * NUM_LAYERS * sizeof(bool));
double* z_mins_dev;
hipMalloc(&z_mins_dev, num_small * sizeof(double));
timer_checkpoint(start);
std::cout << "Sorting triangles... ";
int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks;
numBlocks = (Y_DIM * X_DIM + threadsPerBlock - 1) / threadsPerBlock;
GPUsort(small_tri_dev, num_small, z_mins_dev);
hipDeviceSynchronize();
timer_checkpoint(start);
std::cout << "Processing sorted triangles...";
hipLaunchKernelGGL(( smallTriIntersection), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, small_tri_dev, z_mins_dev, num_small, out_dev);
hipDeviceSynchronize();
timer_checkpoint(start);
std::cout << "Copying memory contents... ";
hipMemcpy(out, out_dev, Y_DIM * X_DIM * NUM_LAYERS * sizeof(bool), hipMemcpyDeviceToHost);
timer_checkpoint(start);
// for (int z = 0; z < NUM_LAYERS; z++) {
// for (int y = Y_DIM-1; y >= 0; y--) {
// for (int x = 0; x < X_DIM; x++) {
// if (out[z][y][x]) std::cout << "XX";
// else std::cout << " ";
// }
// std::cout << std::endl;
// }
// std::cout << std::endl << std::endl;
// }
#ifdef TEST
checkOutput(small_tri_dev, num_small, out);
#endif
free(out);
hipFree(small_tri_dev);
hipFree(out_dev);
hipFree(z_mins_dev);
return 0;
}
|
7aa793b81c6f693e920891d22cf2aed0a1ad2d5d.cu
|
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <utility>
#include "triangle.cuh"
#include "slicer.cuh"
#include "golden.cuh"
#define NOW (std::chrono::system_clock::now())
typedef std::chrono::time_point<std::chrono::system_clock> chrono_t;
void timer_checkpoint(chrono_t & checkpoint) {
#ifdef TEST
chrono_t end = NOW;
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - checkpoint);
std::cout << duration.count() << "ms" << std::endl;
checkpoint = end;
#else
std::cout << std::endl;
#endif
}
int main(int argc, char* argv[]) {
std::string stl_file_name;
std::vector<triangle> small_tri;
chrono_t start;
if (argc == 2) {
stl_file_name = argv[1];
} else if (argc > 2) {
std::cout << "ERROR: Too many command line arguments" << std::endl;
return 0;
} else {
std::cout << "ERROR: Too few command line arguments" << std::endl;
return 0;
}
start = NOW;
read_stl(stl_file_name, small_tri);
size_t num_small = small_tri.size();
std::cout << "Reading STL file... ";
timer_checkpoint(start);
std::cout << "Allocating device memory... ";
triangle* small_tri_dev;
cudaMalloc(&small_tri_dev, num_small * sizeof(triangle));
cudaMemcpy(small_tri_dev, small_tri.data(), num_small * sizeof(triangle), cudaMemcpyHostToDevice);
layer_t* intersections_large;
cudaMalloc(&intersections_large, Y_DIM * X_DIM * NUM_LAYERS * sizeof(layer_t));
size_t* trunk_length;
cudaMalloc(&trunk_length, Y_DIM * X_DIM * sizeof(size_t));
// out[z][y][x]
bool* out = (bool*)malloc(NUM_LAYERS * Y_DIM * X_DIM * sizeof(bool));
bool* out_dev;
cudaMalloc(&out_dev, Y_DIM * X_DIM * NUM_LAYERS * sizeof(bool));
cudaMemset(out_dev, 0, Y_DIM * X_DIM * NUM_LAYERS * sizeof(bool));
double* z_mins_dev;
cudaMalloc(&z_mins_dev, num_small * sizeof(double));
timer_checkpoint(start);
std::cout << "Sorting triangles... ";
int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks;
numBlocks = (Y_DIM * X_DIM + threadsPerBlock - 1) / threadsPerBlock;
GPUsort(small_tri_dev, num_small, z_mins_dev);
cudaDeviceSynchronize();
timer_checkpoint(start);
std::cout << "Processing sorted triangles...";
smallTriIntersection<<<numBlocks, threadsPerBlock>>>(small_tri_dev, z_mins_dev, num_small, out_dev);
cudaDeviceSynchronize();
timer_checkpoint(start);
std::cout << "Copying memory contents... ";
cudaMemcpy(out, out_dev, Y_DIM * X_DIM * NUM_LAYERS * sizeof(bool), cudaMemcpyDeviceToHost);
timer_checkpoint(start);
// for (int z = 0; z < NUM_LAYERS; z++) {
// for (int y = Y_DIM-1; y >= 0; y--) {
// for (int x = 0; x < X_DIM; x++) {
// if (out[z][y][x]) std::cout << "XX";
// else std::cout << " ";
// }
// std::cout << std::endl;
// }
// std::cout << std::endl << std::endl;
// }
#ifdef TEST
checkOutput(small_tri_dev, num_small, out);
#endif
free(out);
cudaFree(small_tri_dev);
cudaFree(out_dev);
cudaFree(z_mins_dev);
return 0;
}
|
c114aa27d3881ed400a9e10be9919e6c7cf76ecc.hip
|
// !!! This is a file automatically generated by hipify!!!
//HEAD_DSCODES
/*
<DUALSPHYSICS> Copyright (c) 2019 by Dr Jose M. Dominguez et al. (see http://dual.sphysics.org/index.php/developers/).
EPHYSLAB Environmental Physics Laboratory, Universidade de Vigo, Ourense, Spain.
School of Mechanical, Aerospace and Civil Engineering, University of Manchester, Manchester, U.K.
This file is part of DualSPHysics.
DualSPHysics is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version.
DualSPHysics is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with DualSPHysics. If not, see <http://www.gnu.org/licenses/>.
*/
/// \file FunctionsMath_ker.cu \brief Implements basic/general math functions for the GPU executions.
#include "TypesDef.h"
#include <hip/hip_runtime_api.h>
namespace cumath{
//------------------------------------------------------------------------------
/// Resuelve punto en el plano.
/// Solves point in the plane.
//------------------------------------------------------------------------------
__device__ double PointPlane(const float4 &pla,const double3 &pt){
return(pt.x*pla.x+pt.y*pla.y+pt.z*pla.z+pla.w);
}
//------------------------------------------------------------------------------
/// Resuelve punto en el plano.
/// Solves point in the plane.
//------------------------------------------------------------------------------
__device__ float PointPlane(const float4 &pla,float px,float py,float pz){
return(pla.x*px+pla.y*py+pla.z*pz+pla.w);
}
//------------------------------------------------------------------------------
/// Returns the distance between a point and a plane.
/// Devuelve la distancia entre un punto y un plano.
//------------------------------------------------------------------------------
__device__ double DistPlaneSign(const float4 &pla,const double3 &pt){
return(PointPlane(pla,pt)/sqrt(pla.x*pla.x+pla.y*pla.y+pla.z*pla.z));
}
//------------------------------------------------------------------------------
/// Returns the distance between a point and a plane.
/// Devuelve la distancia entre un punto y un plano.
//------------------------------------------------------------------------------
__device__ float KerDistPlaneSign(const float4 &pla,float px,float py,float pz){
return(PointPlane(pla,px,py,pz)/sqrt(pla.x*pla.x+pla.y*pla.y+pla.z*pla.z));
}
//------------------------------------------------------------------------------
/// Returns the distance between a point and a plane.
/// Devuelve la distancia entre un punto y un plano.
//------------------------------------------------------------------------------
__device__ double DistPlane(const float4 &pla,const double3 &pt){
return(fabs(DistPlaneSign(pla,pt)));
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix3fReset(tmatrix3f &m){
m.a11=m.a12=m.a13=m.a21=m.a22=m.a23=m.a31=m.a32=m.a33=0;
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix3dReset(tmatrix3d &m){
m.a11=m.a12=m.a13=m.a21=m.a22=m.a23=m.a31=m.a32=m.a33=0;
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix4fReset(tmatrix4f &m){
m.a11=m.a12=m.a13=m.a14=m.a21=m.a22=m.a23=m.a24=m.a31=m.a32=m.a33=m.a34=m.a41=m.a42=m.a43=m.a44=0;
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix4dReset(tmatrix4d &m){
m.a11=m.a12=m.a13=m.a14=m.a21=m.a22=m.a23=m.a24=m.a31=m.a32=m.a33=m.a34=m.a41=m.a42=m.a43=m.a44=0;
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 3x3.
/// Returns the determinant of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ float Determinant3x3(const tmatrix3f &d){
return(d.a11*d.a22*d.a33 + d.a12*d.a23*d.a31 + d.a13*d.a21*d.a32 - d.a31*d.a22*d.a13 - d.a32*d.a23*d.a11 - d.a33*d.a21*d.a12);
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 3x3.
/// Returns the determinant of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant3x3dbl(const tmatrix3f &d){
return(double(d.a11)*double(d.a22)*double(d.a33) + double(d.a12)*double(d.a23)*double(d.a31) + double(d.a13)*double(d.a21)*double(d.a32) - double(d.a31)*double(d.a22)*double(d.a13) - double(d.a32)*double(d.a23)*double(d.a11) - double(d.a33)*double(d.a21)*double(d.a12));
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 3x3.
/// Returns the determinant of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant3x3(const tmatrix3d &d){
return(d.a11*d.a22*d.a33 + d.a12*d.a23*d.a31 + d.a13*d.a21*d.a32 - d.a31*d.a22*d.a13 - d.a32*d.a23*d.a11 - d.a33*d.a21*d.a12);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix3f InverseMatrix3x3(const tmatrix3f &d,const float det){
tmatrix3f inv;
if(det){
inv.a11= (d.a22*d.a33-d.a23*d.a32)/det;
inv.a12=-(d.a12*d.a33-d.a13*d.a32)/det;
inv.a13= (d.a12*d.a23-d.a13*d.a22)/det;
inv.a21=-(d.a21*d.a33-d.a23*d.a31)/det;
inv.a22= (d.a11*d.a33-d.a13*d.a31)/det;
inv.a23=-(d.a11*d.a23-d.a13*d.a21)/det;
inv.a31= (d.a21*d.a32-d.a22*d.a31)/det;
inv.a32=-(d.a11*d.a32-d.a12*d.a31)/det;
inv.a33= (d.a11*d.a22-d.a12*d.a21)/det;
}
else Tmatrix3fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix3f InverseMatrix3x3dbl(const tmatrix3f &d,const double det){
tmatrix3f inv;
if(det){
inv.a11=float( (double(d.a22)*double(d.a33)-double(d.a23)*double(d.a32))/det);
inv.a12=float(-(double(d.a12)*double(d.a33)-double(d.a13)*double(d.a32))/det);
inv.a13=float( (double(d.a12)*double(d.a23)-double(d.a13)*double(d.a22))/det);
inv.a21=float(-(double(d.a21)*double(d.a33)-double(d.a23)*double(d.a31))/det);
inv.a22=float( (double(d.a11)*double(d.a33)-double(d.a13)*double(d.a31))/det);
inv.a23=float(-(double(d.a11)*double(d.a23)-double(d.a13)*double(d.a21))/det);
inv.a31=float( (double(d.a21)*double(d.a32)-double(d.a22)*double(d.a31))/det);
inv.a32=float(-(double(d.a11)*double(d.a32)-double(d.a12)*double(d.a31))/det);
inv.a33=float( (double(d.a11)*double(d.a22)-double(d.a12)*double(d.a21))/det);
}
else Tmatrix3fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix3d InverseMatrix3x3(const tmatrix3d &d,const double det){
tmatrix3d inv;
if(det){
inv.a11= (d.a22*d.a33-d.a23*d.a32)/det;
inv.a12=-(d.a12*d.a33-d.a13*d.a32)/det;
inv.a13= (d.a12*d.a23-d.a13*d.a22)/det;
inv.a21=-(d.a21*d.a33-d.a23*d.a31)/det;
inv.a22= (d.a11*d.a33-d.a13*d.a31)/det;
inv.a23=-(d.a11*d.a23-d.a13*d.a21)/det;
inv.a31= (d.a21*d.a32-d.a22*d.a31)/det;
inv.a32=-(d.a11*d.a32-d.a12*d.a31)/det;
inv.a33= (d.a11*d.a22-d.a12*d.a21)/det;
}
else Tmatrix3dReset(inv);
return(inv);
}
//==============================================================================
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//==============================================================================
__device__ tmatrix3f InverseMatrix3x3(const tmatrix3f &d){
return(InverseMatrix3x3(d,Determinant3x3(d)));
}
//==============================================================================
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//==============================================================================
__device__ tmatrix3d InverseMatrix3x3(const tmatrix3d &d){
return(InverseMatrix3x3(d,Determinant3x3(d)));
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 4x4.
/// Returns the determinant of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ float Determinant4x4(const tmatrix4f &d){
return(d.a14*d.a23*d.a32*d.a41 - d.a13*d.a24*d.a32*d.a41 -
d.a14*d.a22*d.a33*d.a41 + d.a12*d.a24*d.a33*d.a41 +
d.a13*d.a22*d.a34*d.a41 - d.a12*d.a23*d.a34*d.a41 -
d.a14*d.a23*d.a31*d.a42 + d.a13*d.a24*d.a31*d.a42 +
d.a14*d.a21*d.a33*d.a42 - d.a11*d.a24*d.a33*d.a42 -
d.a13*d.a21*d.a34*d.a42 + d.a11*d.a23*d.a34*d.a42 +
d.a14*d.a22*d.a31*d.a43 - d.a12*d.a24*d.a31*d.a43 -
d.a14*d.a21*d.a32*d.a43 + d.a11*d.a24*d.a32*d.a43 +
d.a12*d.a21*d.a34*d.a43 - d.a11*d.a22*d.a34*d.a43 -
d.a13*d.a22*d.a31*d.a44 + d.a12*d.a23*d.a31*d.a44 +
d.a13*d.a21*d.a32*d.a44 - d.a11*d.a23*d.a32*d.a44 -
d.a12*d.a21*d.a33*d.a44 + d.a11*d.a22*d.a33*d.a44);
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 4x4.
/// Returns the determinant of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant4x4dbl(const tmatrix4f &d){
return(double(d.a14)*double(d.a23)*double(d.a32)*double(d.a41) - double(d.a13)*double(d.a24)*double(d.a32)*double(d.a41) -
double(d.a14)*double(d.a22)*double(d.a33)*double(d.a41) + double(d.a12)*double(d.a24)*double(d.a33)*double(d.a41) +
double(d.a13)*double(d.a22)*double(d.a34)*double(d.a41) - double(d.a12)*double(d.a23)*double(d.a34)*double(d.a41) -
double(d.a14)*double(d.a23)*double(d.a31)*double(d.a42) + double(d.a13)*double(d.a24)*double(d.a31)*double(d.a42) +
double(d.a14)*double(d.a21)*double(d.a33)*double(d.a42) - double(d.a11)*double(d.a24)*double(d.a33)*double(d.a42) -
double(d.a13)*double(d.a21)*double(d.a34)*double(d.a42) + double(d.a11)*double(d.a23)*double(d.a34)*double(d.a42) +
double(d.a14)*double(d.a22)*double(d.a31)*double(d.a43) - double(d.a12)*double(d.a24)*double(d.a31)*double(d.a43) -
double(d.a14)*double(d.a21)*double(d.a32)*double(d.a43) + double(d.a11)*double(d.a24)*double(d.a32)*double(d.a43) +
double(d.a12)*double(d.a21)*double(d.a34)*double(d.a43) - double(d.a11)*double(d.a22)*double(d.a34)*double(d.a43) -
double(d.a13)*double(d.a22)*double(d.a31)*double(d.a44) + double(d.a12)*double(d.a23)*double(d.a31)*double(d.a44) +
double(d.a13)*double(d.a21)*double(d.a32)*double(d.a44) - double(d.a11)*double(d.a23)*double(d.a32)*double(d.a44) -
double(d.a12)*double(d.a21)*double(d.a33)*double(d.a44) + double(d.a11)*double(d.a22)*double(d.a33)*double(d.a44));
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 4x4.
/// Returns the determinant of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant4x4(const tmatrix4d &d){
return(d.a14*d.a23*d.a32*d.a41 - d.a13*d.a24*d.a32*d.a41 -
d.a14*d.a22*d.a33*d.a41 + d.a12*d.a24*d.a33*d.a41 +
d.a13*d.a22*d.a34*d.a41 - d.a12*d.a23*d.a34*d.a41 -
d.a14*d.a23*d.a31*d.a42 + d.a13*d.a24*d.a31*d.a42 +
d.a14*d.a21*d.a33*d.a42 - d.a11*d.a24*d.a33*d.a42 -
d.a13*d.a21*d.a34*d.a42 + d.a11*d.a23*d.a34*d.a42 +
d.a14*d.a22*d.a31*d.a43 - d.a12*d.a24*d.a31*d.a43 -
d.a14*d.a21*d.a32*d.a43 + d.a11*d.a24*d.a32*d.a43 +
d.a12*d.a21*d.a34*d.a43 - d.a11*d.a22*d.a34*d.a43 -
d.a13*d.a22*d.a31*d.a44 + d.a12*d.a23*d.a31*d.a44 +
d.a13*d.a21*d.a32*d.a44 - d.a11*d.a23*d.a32*d.a44 -
d.a12*d.a21*d.a33*d.a44 + d.a11*d.a22*d.a33*d.a44);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 4x4.
/// Returns the inverse matrix of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix4f InverseMatrix4x4(const tmatrix4f &d,const float det){
tmatrix4f inv;
if(det){
inv.a11=(d.a22*(d.a33*d.a44-d.a34*d.a43) + d.a23*(d.a34*d.a42-d.a32*d.a44) + d.a24*(d.a32*d.a43-d.a33*d.a42)) /det;
inv.a21=(d.a21*(d.a34*d.a43-d.a33*d.a44) + d.a23*(d.a31*d.a44-d.a34*d.a41) + d.a24*(d.a33*d.a41-d.a31*d.a43)) /det;
inv.a31=(d.a21*(d.a32*d.a44-d.a34*d.a42) + d.a22*(d.a34*d.a41-d.a31*d.a44) + d.a24*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a41=(d.a21*(d.a33*d.a42-d.a32*d.a43) + d.a22*(d.a31*d.a43-d.a33*d.a41) + d.a23*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a12=(d.a12*(d.a34*d.a43-d.a33*d.a44) + d.a13*(d.a32*d.a44-d.a34*d.a42) + d.a14*(d.a33*d.a42-d.a32*d.a43)) /det;
inv.a22=(d.a11*(d.a33*d.a44-d.a34*d.a43) + d.a13*(d.a34*d.a41-d.a31*d.a44) + d.a14*(d.a31*d.a43-d.a33*d.a41)) /det;
inv.a32=(d.a11*(d.a34*d.a42-d.a32*d.a44) + d.a12*(d.a31*d.a44-d.a34*d.a41) + d.a14*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a42=(d.a11*(d.a32*d.a43-d.a33*d.a42) + d.a12*(d.a33*d.a41-d.a31*d.a43) + d.a13*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a13=(d.a12*(d.a23*d.a44-d.a24*d.a43) + d.a13*(d.a24*d.a42-d.a22*d.a44) + d.a14*(d.a22*d.a43-d.a23*d.a42)) /det;
inv.a23=(d.a11*(d.a24*d.a43-d.a23*d.a44) + d.a13*(d.a21*d.a44-d.a24*d.a41) + d.a14*(d.a23*d.a41-d.a21*d.a43)) /det;
inv.a33=(d.a11*(d.a22*d.a44-d.a24*d.a42) + d.a12*(d.a24*d.a41-d.a21*d.a44) + d.a14*(d.a21*d.a42-d.a22*d.a41)) /det;
inv.a43=(d.a11*(d.a23*d.a42-d.a22*d.a43) + d.a12*(d.a21*d.a43-d.a23*d.a41) + d.a13*(d.a22*d.a41-d.a21*d.a42)) /det;
inv.a14=(d.a12*(d.a24*d.a33-d.a23*d.a34) + d.a13*(d.a22*d.a34-d.a24*d.a32) + d.a14*(d.a23*d.a32-d.a22*d.a33)) /det;
inv.a24=(d.a11*(d.a23*d.a34-d.a24*d.a33) + d.a13*(d.a24*d.a31-d.a21*d.a34) + d.a14*(d.a21*d.a33-d.a23*d.a31)) /det;
inv.a34=(d.a11*(d.a24*d.a32-d.a22*d.a34) + d.a12*(d.a21*d.a34-d.a24*d.a31) + d.a14*(d.a22*d.a31-d.a21*d.a32)) /det;
inv.a44=(d.a11*(d.a22*d.a33-d.a23*d.a32) + d.a12*(d.a23*d.a31-d.a21*d.a33) + d.a13*(d.a21*d.a32-d.a22*d.a31)) /det;
}
else Tmatrix4fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 4x4.
/// Returns the inverse matrix of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix4f InverseMatrix4x4dbl(const tmatrix4f &d,const double det){
tmatrix4f inv;
if(det){
inv.a11=(double(d.a22)*(double(d.a33)*double(d.a44)-double(d.a34)*double(d.a43)) + double(d.a23)*(double(d.a34)*double(d.a42)-double(d.a32)*double(d.a44)) + double(d.a24)*(double(d.a32)*double(d.a43)-double(d.a33)*double(d.a42))) /det;
inv.a21=(double(d.a21)*(double(d.a34)*double(d.a43)-double(d.a33)*double(d.a44)) + double(d.a23)*(double(d.a31)*double(d.a44)-double(d.a34)*double(d.a41)) + double(d.a24)*(double(d.a33)*double(d.a41)-double(d.a31)*double(d.a43))) /det;
inv.a31=(double(d.a21)*(double(d.a32)*double(d.a44)-double(d.a34)*double(d.a42)) + double(d.a22)*(double(d.a34)*double(d.a41)-double(d.a31)*double(d.a44)) + double(d.a24)*(double(d.a31)*double(d.a42)-double(d.a32)*double(d.a41))) /det;
inv.a41=(double(d.a21)*(double(d.a33)*double(d.a42)-double(d.a32)*double(d.a43)) + double(d.a22)*(double(d.a31)*double(d.a43)-double(d.a33)*double(d.a41)) + double(d.a23)*(double(d.a32)*double(d.a41)-double(d.a31)*double(d.a42))) /det;
inv.a12=(double(d.a12)*(double(d.a34)*double(d.a43)-double(d.a33)*double(d.a44)) + double(d.a13)*(double(d.a32)*double(d.a44)-double(d.a34)*double(d.a42)) + double(d.a14)*(double(d.a33)*double(d.a42)-double(d.a32)*double(d.a43))) /det;
inv.a22=(double(d.a11)*(double(d.a33)*double(d.a44)-double(d.a34)*double(d.a43)) + double(d.a13)*(double(d.a34)*double(d.a41)-double(d.a31)*double(d.a44)) + double(d.a14)*(double(d.a31)*double(d.a43)-double(d.a33)*double(d.a41))) /det;
inv.a32=(double(d.a11)*(double(d.a34)*double(d.a42)-double(d.a32)*double(d.a44)) + double(d.a12)*(double(d.a31)*double(d.a44)-double(d.a34)*double(d.a41)) + double(d.a14)*(double(d.a32)*double(d.a41)-double(d.a31)*double(d.a42))) /det;
inv.a42=(double(d.a11)*(double(d.a32)*double(d.a43)-double(d.a33)*double(d.a42)) + double(d.a12)*(double(d.a33)*double(d.a41)-double(d.a31)*double(d.a43)) + double(d.a13)*(double(d.a31)*double(d.a42)-double(d.a32)*double(d.a41))) /det;
inv.a13=(double(d.a12)*(double(d.a23)*double(d.a44)-double(d.a24)*double(d.a43)) + double(d.a13)*(double(d.a24)*double(d.a42)-double(d.a22)*double(d.a44)) + double(d.a14)*(double(d.a22)*double(d.a43)-double(d.a23)*double(d.a42))) /det;
inv.a23=(double(d.a11)*(double(d.a24)*double(d.a43)-double(d.a23)*double(d.a44)) + double(d.a13)*(double(d.a21)*double(d.a44)-double(d.a24)*double(d.a41)) + double(d.a14)*(double(d.a23)*double(d.a41)-double(d.a21)*double(d.a43))) /det;
inv.a33=(double(d.a11)*(double(d.a22)*double(d.a44)-double(d.a24)*double(d.a42)) + double(d.a12)*(double(d.a24)*double(d.a41)-double(d.a21)*double(d.a44)) + double(d.a14)*(double(d.a21)*double(d.a42)-double(d.a22)*double(d.a41))) /det;
inv.a43=(double(d.a11)*(double(d.a23)*double(d.a42)-double(d.a22)*double(d.a43)) + double(d.a12)*(double(d.a21)*double(d.a43)-double(d.a23)*double(d.a41)) + double(d.a13)*(double(d.a22)*double(d.a41)-double(d.a21)*double(d.a42))) /det;
inv.a14=(double(d.a12)*(double(d.a24)*double(d.a33)-double(d.a23)*double(d.a34)) + double(d.a13)*(double(d.a22)*double(d.a34)-double(d.a24)*double(d.a32)) + double(d.a14)*(double(d.a23)*double(d.a32)-double(d.a22)*double(d.a33))) /det;
inv.a24=(double(d.a11)*(double(d.a23)*double(d.a34)-double(d.a24)*double(d.a33)) + double(d.a13)*(double(d.a24)*double(d.a31)-double(d.a21)*double(d.a34)) + double(d.a14)*(double(d.a21)*double(d.a33)-double(d.a23)*double(d.a31))) /det;
inv.a34=(double(d.a11)*(double(d.a24)*double(d.a32)-double(d.a22)*double(d.a34)) + double(d.a12)*(double(d.a21)*double(d.a34)-double(d.a24)*double(d.a31)) + double(d.a14)*(double(d.a22)*double(d.a31)-double(d.a21)*double(d.a32))) /det;
inv.a44=(double(d.a11)*(double(d.a22)*double(d.a33)-double(d.a23)*double(d.a32)) + double(d.a12)*(double(d.a23)*double(d.a31)-double(d.a21)*double(d.a33)) + double(d.a13)*(double(d.a21)*double(d.a32)-double(d.a22)*double(d.a31))) /det;
}
else Tmatrix4fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 4x4.
/// Returns the inverse matrix of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix4d InverseMatrix4x4(const tmatrix4d &d,const double det){
tmatrix4d inv;
if(det){
inv.a11=(d.a22*(d.a33*d.a44-d.a34*d.a43) + d.a23*(d.a34*d.a42-d.a32*d.a44) + d.a24*(d.a32*d.a43-d.a33*d.a42)) /det;
inv.a21=(d.a21*(d.a34*d.a43-d.a33*d.a44) + d.a23*(d.a31*d.a44-d.a34*d.a41) + d.a24*(d.a33*d.a41-d.a31*d.a43)) /det;
inv.a31=(d.a21*(d.a32*d.a44-d.a34*d.a42) + d.a22*(d.a34*d.a41-d.a31*d.a44) + d.a24*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a41=(d.a21*(d.a33*d.a42-d.a32*d.a43) + d.a22*(d.a31*d.a43-d.a33*d.a41) + d.a23*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a12=(d.a12*(d.a34*d.a43-d.a33*d.a44) + d.a13*(d.a32*d.a44-d.a34*d.a42) + d.a14*(d.a33*d.a42-d.a32*d.a43)) /det;
inv.a22=(d.a11*(d.a33*d.a44-d.a34*d.a43) + d.a13*(d.a34*d.a41-d.a31*d.a44) + d.a14*(d.a31*d.a43-d.a33*d.a41)) /det;
inv.a32=(d.a11*(d.a34*d.a42-d.a32*d.a44) + d.a12*(d.a31*d.a44-d.a34*d.a41) + d.a14*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a42=(d.a11*(d.a32*d.a43-d.a33*d.a42) + d.a12*(d.a33*d.a41-d.a31*d.a43) + d.a13*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a13=(d.a12*(d.a23*d.a44-d.a24*d.a43) + d.a13*(d.a24*d.a42-d.a22*d.a44) + d.a14*(d.a22*d.a43-d.a23*d.a42)) /det;
inv.a23=(d.a11*(d.a24*d.a43-d.a23*d.a44) + d.a13*(d.a21*d.a44-d.a24*d.a41) + d.a14*(d.a23*d.a41-d.a21*d.a43)) /det;
inv.a33=(d.a11*(d.a22*d.a44-d.a24*d.a42) + d.a12*(d.a24*d.a41-d.a21*d.a44) + d.a14*(d.a21*d.a42-d.a22*d.a41)) /det;
inv.a43=(d.a11*(d.a23*d.a42-d.a22*d.a43) + d.a12*(d.a21*d.a43-d.a23*d.a41) + d.a13*(d.a22*d.a41-d.a21*d.a42)) /det;
inv.a14=(d.a12*(d.a24*d.a33-d.a23*d.a34) + d.a13*(d.a22*d.a34-d.a24*d.a32) + d.a14*(d.a23*d.a32-d.a22*d.a33)) /det;
inv.a24=(d.a11*(d.a23*d.a34-d.a24*d.a33) + d.a13*(d.a24*d.a31-d.a21*d.a34) + d.a14*(d.a21*d.a33-d.a23*d.a31)) /det;
inv.a34=(d.a11*(d.a24*d.a32-d.a22*d.a34) + d.a12*(d.a21*d.a34-d.a24*d.a31) + d.a14*(d.a22*d.a31-d.a21*d.a32)) /det;
inv.a44=(d.a11*(d.a22*d.a33-d.a23*d.a32) + d.a12*(d.a23*d.a31-d.a21*d.a33) + d.a13*(d.a21*d.a32-d.a22*d.a31)) /det;
}
else Tmatrix4dReset(inv);
return(inv);
}
//==============================================================================
/// Devuelve producto de 2 matrices de 3x3.
/// Returns the product of 2 matrices of 3x3.
//==============================================================================
__device__ tmatrix3f MulMatrix3x3(const tmatrix3f &a,const tmatrix3f &b){
tmatrix3f ret;
ret.a11=a.a11*b.a11 + a.a12*b.a21 + a.a13*b.a31;
ret.a12=a.a11*b.a12 + a.a12*b.a22 + a.a13*b.a32;
ret.a13=a.a11*b.a13 + a.a12*b.a23 + a.a13*b.a33;
ret.a21=a.a21*b.a11 + a.a22*b.a21 + a.a23*b.a31;
ret.a22=a.a21*b.a12 + a.a22*b.a22 + a.a23*b.a32;
ret.a23=a.a21*b.a13 + a.a22*b.a23 + a.a23*b.a33;
ret.a31=a.a31*b.a11 + a.a32*b.a21 + a.a33*b.a31;
ret.a32=a.a31*b.a12 + a.a32*b.a22 + a.a33*b.a32;
ret.a33=a.a31*b.a13 + a.a32*b.a23 + a.a33*b.a33;
return(ret);
}
//==============================================================================
/// Devuelve producto de 2 matrices de 3x3.
/// Returns the product of 2 matrices of 3x3.
//==============================================================================
__device__ tmatrix3d MulMatrix3x3(const tmatrix3d &a,const tmatrix3d &b){
tmatrix3d ret;
ret.a11=a.a11*b.a11 + a.a12*b.a21 + a.a13*b.a31;
ret.a12=a.a11*b.a12 + a.a12*b.a22 + a.a13*b.a32;
ret.a13=a.a11*b.a13 + a.a12*b.a23 + a.a13*b.a33;
ret.a21=a.a21*b.a11 + a.a22*b.a21 + a.a23*b.a31;
ret.a22=a.a21*b.a12 + a.a22*b.a22 + a.a23*b.a32;
ret.a23=a.a21*b.a13 + a.a22*b.a23 + a.a23*b.a33;
ret.a31=a.a31*b.a11 + a.a32*b.a21 + a.a33*b.a31;
ret.a32=a.a31*b.a12 + a.a32*b.a22 + a.a33*b.a32;
ret.a33=a.a31*b.a13 + a.a32*b.a23 + a.a33*b.a33;
return(ret);
}
//==============================================================================
/// Devuelve traspuesta de matriz 3x3.
/// Returns the transpose from matrix 3x3.
//==============================================================================
__device__ tmatrix3f TrasMatrix3x3(const tmatrix3f &a){
tmatrix3f ret;
ret.a11=a.a11; ret.a12=a.a21; ret.a13=a.a31;
ret.a21=a.a12; ret.a22=a.a22; ret.a23=a.a32;
ret.a31=a.a13; ret.a32=a.a23; ret.a33=a.a33;
return(ret);
}
//==============================================================================
/// Devuelve traspuesta de matriz 3x3.
/// Returns the transpose from matrix 3x3.
//==============================================================================
__device__ tmatrix3d TrasMatrix3x3(const tmatrix3d &a){
tmatrix3d ret;
ret.a11=a.a11; ret.a12=a.a21; ret.a13=a.a31;
ret.a21=a.a12; ret.a22=a.a22; ret.a23=a.a32;
ret.a31=a.a13; ret.a32=a.a23; ret.a33=a.a33;
return(ret);
}
//==============================================================================
/// Devuelve la matriz de rotacion.
/// Returns the rotation matrix.
//==============================================================================
__device__ tmatrix3f RotMatrix3x3(const float3 &ang){
const float cosx=cos(ang.x),cosy=cos(ang.y),cosz=cos(ang.z);
const float sinx=sin(ang.x),siny=sin(ang.y),sinz=sin(ang.z);
tmatrix3f ret;
ret.a11= cosy*cosz;
ret.a12=-cosy*sinz;
ret.a13= siny;
ret.a21= sinx*siny*cosz + cosx*sinz;
ret.a22=-sinx*siny*sinz + cosx*cosz;
ret.a23=-sinx*cosy;
ret.a31=-cosx*siny*cosz + sinx*sinz;
ret.a32= cosx*siny*sinz + sinx*cosz;
ret.a33= cosx*cosy;
return(ret);
}
//==============================================================================
/// Devuelve la matriz de rotacion.
/// Returns the rotation matrix.
//==============================================================================
__device__ tmatrix3d RotMatrix3x3(const double3 &ang){
const double cosx=cos(ang.x),cosy=cos(ang.y),cosz=cos(ang.z);
const double sinx=sin(ang.x),siny=sin(ang.y),sinz=sin(ang.z);
tmatrix3d ret;
ret.a11= cosy*cosz;
ret.a12=-cosy*sinz;
ret.a13= siny;
ret.a21= sinx*siny*cosz + cosx*sinz;
ret.a22=-sinx*siny*sinz + cosx*cosz;
ret.a23=-sinx*cosy;
ret.a31=-cosx*siny*cosz + sinx*sinz;
ret.a32= cosx*siny*sinz + sinx*cosz;
ret.a33= cosx*cosy;
return(ret);
}
}
|
c114aa27d3881ed400a9e10be9919e6c7cf76ecc.cu
|
//HEAD_DSCODES
/*
<DUALSPHYSICS> Copyright (c) 2019 by Dr Jose M. Dominguez et al. (see http://dual.sphysics.org/index.php/developers/).
EPHYSLAB Environmental Physics Laboratory, Universidade de Vigo, Ourense, Spain.
School of Mechanical, Aerospace and Civil Engineering, University of Manchester, Manchester, U.K.
This file is part of DualSPHysics.
DualSPHysics is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version.
DualSPHysics is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with DualSPHysics. If not, see <http://www.gnu.org/licenses/>.
*/
/// \file FunctionsMath_ker.cu \brief Implements basic/general math functions for the GPU executions.
#include "TypesDef.h"
#include <cuda_runtime_api.h>
namespace cumath{
//------------------------------------------------------------------------------
/// Resuelve punto en el plano.
/// Solves point in the plane.
//------------------------------------------------------------------------------
__device__ double PointPlane(const float4 &pla,const double3 &pt){
return(pt.x*pla.x+pt.y*pla.y+pt.z*pla.z+pla.w);
}
//------------------------------------------------------------------------------
/// Resuelve punto en el plano.
/// Solves point in the plane.
//------------------------------------------------------------------------------
__device__ float PointPlane(const float4 &pla,float px,float py,float pz){
return(pla.x*px+pla.y*py+pla.z*pz+pla.w);
}
//------------------------------------------------------------------------------
/// Returns the distance between a point and a plane.
/// Devuelve la distancia entre un punto y un plano.
//------------------------------------------------------------------------------
__device__ double DistPlaneSign(const float4 &pla,const double3 &pt){
return(PointPlane(pla,pt)/sqrt(pla.x*pla.x+pla.y*pla.y+pla.z*pla.z));
}
//------------------------------------------------------------------------------
/// Returns the distance between a point and a plane.
/// Devuelve la distancia entre un punto y un plano.
//------------------------------------------------------------------------------
__device__ float KerDistPlaneSign(const float4 &pla,float px,float py,float pz){
return(PointPlane(pla,px,py,pz)/sqrt(pla.x*pla.x+pla.y*pla.y+pla.z*pla.z));
}
//------------------------------------------------------------------------------
/// Returns the distance between a point and a plane.
/// Devuelve la distancia entre un punto y un plano.
//------------------------------------------------------------------------------
__device__ double DistPlane(const float4 &pla,const double3 &pt){
return(fabs(DistPlaneSign(pla,pt)));
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix3fReset(tmatrix3f &m){
m.a11=m.a12=m.a13=m.a21=m.a22=m.a23=m.a31=m.a32=m.a33=0;
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix3dReset(tmatrix3d &m){
m.a11=m.a12=m.a13=m.a21=m.a22=m.a23=m.a31=m.a32=m.a33=0;
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix4fReset(tmatrix4f &m){
m.a11=m.a12=m.a13=m.a14=m.a21=m.a22=m.a23=m.a24=m.a31=m.a32=m.a33=m.a34=m.a41=m.a42=m.a43=m.a44=0;
}
//------------------------------------------------------------------------------
/// Initializes matrix to zero.
/// Inicializa matriz a cero.
//------------------------------------------------------------------------------
__device__ void Tmatrix4dReset(tmatrix4d &m){
m.a11=m.a12=m.a13=m.a14=m.a21=m.a22=m.a23=m.a24=m.a31=m.a32=m.a33=m.a34=m.a41=m.a42=m.a43=m.a44=0;
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 3x3.
/// Returns the determinant of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ float Determinant3x3(const tmatrix3f &d){
return(d.a11*d.a22*d.a33 + d.a12*d.a23*d.a31 + d.a13*d.a21*d.a32 - d.a31*d.a22*d.a13 - d.a32*d.a23*d.a11 - d.a33*d.a21*d.a12);
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 3x3.
/// Returns the determinant of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant3x3dbl(const tmatrix3f &d){
return(double(d.a11)*double(d.a22)*double(d.a33) + double(d.a12)*double(d.a23)*double(d.a31) + double(d.a13)*double(d.a21)*double(d.a32) - double(d.a31)*double(d.a22)*double(d.a13) - double(d.a32)*double(d.a23)*double(d.a11) - double(d.a33)*double(d.a21)*double(d.a12));
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 3x3.
/// Returns the determinant of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant3x3(const tmatrix3d &d){
return(d.a11*d.a22*d.a33 + d.a12*d.a23*d.a31 + d.a13*d.a21*d.a32 - d.a31*d.a22*d.a13 - d.a32*d.a23*d.a11 - d.a33*d.a21*d.a12);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix3f InverseMatrix3x3(const tmatrix3f &d,const float det){
tmatrix3f inv;
if(det){
inv.a11= (d.a22*d.a33-d.a23*d.a32)/det;
inv.a12=-(d.a12*d.a33-d.a13*d.a32)/det;
inv.a13= (d.a12*d.a23-d.a13*d.a22)/det;
inv.a21=-(d.a21*d.a33-d.a23*d.a31)/det;
inv.a22= (d.a11*d.a33-d.a13*d.a31)/det;
inv.a23=-(d.a11*d.a23-d.a13*d.a21)/det;
inv.a31= (d.a21*d.a32-d.a22*d.a31)/det;
inv.a32=-(d.a11*d.a32-d.a12*d.a31)/det;
inv.a33= (d.a11*d.a22-d.a12*d.a21)/det;
}
else Tmatrix3fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix3f InverseMatrix3x3dbl(const tmatrix3f &d,const double det){
tmatrix3f inv;
if(det){
inv.a11=float( (double(d.a22)*double(d.a33)-double(d.a23)*double(d.a32))/det);
inv.a12=float(-(double(d.a12)*double(d.a33)-double(d.a13)*double(d.a32))/det);
inv.a13=float( (double(d.a12)*double(d.a23)-double(d.a13)*double(d.a22))/det);
inv.a21=float(-(double(d.a21)*double(d.a33)-double(d.a23)*double(d.a31))/det);
inv.a22=float( (double(d.a11)*double(d.a33)-double(d.a13)*double(d.a31))/det);
inv.a23=float(-(double(d.a11)*double(d.a23)-double(d.a13)*double(d.a21))/det);
inv.a31=float( (double(d.a21)*double(d.a32)-double(d.a22)*double(d.a31))/det);
inv.a32=float(-(double(d.a11)*double(d.a32)-double(d.a12)*double(d.a31))/det);
inv.a33=float( (double(d.a11)*double(d.a22)-double(d.a12)*double(d.a21))/det);
}
else Tmatrix3fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix3d InverseMatrix3x3(const tmatrix3d &d,const double det){
tmatrix3d inv;
if(det){
inv.a11= (d.a22*d.a33-d.a23*d.a32)/det;
inv.a12=-(d.a12*d.a33-d.a13*d.a32)/det;
inv.a13= (d.a12*d.a23-d.a13*d.a22)/det;
inv.a21=-(d.a21*d.a33-d.a23*d.a31)/det;
inv.a22= (d.a11*d.a33-d.a13*d.a31)/det;
inv.a23=-(d.a11*d.a23-d.a13*d.a21)/det;
inv.a31= (d.a21*d.a32-d.a22*d.a31)/det;
inv.a32=-(d.a11*d.a32-d.a12*d.a31)/det;
inv.a33= (d.a11*d.a22-d.a12*d.a21)/det;
}
else Tmatrix3dReset(inv);
return(inv);
}
//==============================================================================
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//==============================================================================
__device__ tmatrix3f InverseMatrix3x3(const tmatrix3f &d){
return(InverseMatrix3x3(d,Determinant3x3(d)));
}
//==============================================================================
/// Devuelve la matriz inversa de una matriz de 3x3.
/// Returns the inverse matrix of a 3x3 matrix.
//==============================================================================
__device__ tmatrix3d InverseMatrix3x3(const tmatrix3d &d){
return(InverseMatrix3x3(d,Determinant3x3(d)));
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 4x4.
/// Returns the determinant of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ float Determinant4x4(const tmatrix4f &d){
return(d.a14*d.a23*d.a32*d.a41 - d.a13*d.a24*d.a32*d.a41 -
d.a14*d.a22*d.a33*d.a41 + d.a12*d.a24*d.a33*d.a41 +
d.a13*d.a22*d.a34*d.a41 - d.a12*d.a23*d.a34*d.a41 -
d.a14*d.a23*d.a31*d.a42 + d.a13*d.a24*d.a31*d.a42 +
d.a14*d.a21*d.a33*d.a42 - d.a11*d.a24*d.a33*d.a42 -
d.a13*d.a21*d.a34*d.a42 + d.a11*d.a23*d.a34*d.a42 +
d.a14*d.a22*d.a31*d.a43 - d.a12*d.a24*d.a31*d.a43 -
d.a14*d.a21*d.a32*d.a43 + d.a11*d.a24*d.a32*d.a43 +
d.a12*d.a21*d.a34*d.a43 - d.a11*d.a22*d.a34*d.a43 -
d.a13*d.a22*d.a31*d.a44 + d.a12*d.a23*d.a31*d.a44 +
d.a13*d.a21*d.a32*d.a44 - d.a11*d.a23*d.a32*d.a44 -
d.a12*d.a21*d.a33*d.a44 + d.a11*d.a22*d.a33*d.a44);
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 4x4.
/// Returns the determinant of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant4x4dbl(const tmatrix4f &d){
return(double(d.a14)*double(d.a23)*double(d.a32)*double(d.a41) - double(d.a13)*double(d.a24)*double(d.a32)*double(d.a41) -
double(d.a14)*double(d.a22)*double(d.a33)*double(d.a41) + double(d.a12)*double(d.a24)*double(d.a33)*double(d.a41) +
double(d.a13)*double(d.a22)*double(d.a34)*double(d.a41) - double(d.a12)*double(d.a23)*double(d.a34)*double(d.a41) -
double(d.a14)*double(d.a23)*double(d.a31)*double(d.a42) + double(d.a13)*double(d.a24)*double(d.a31)*double(d.a42) +
double(d.a14)*double(d.a21)*double(d.a33)*double(d.a42) - double(d.a11)*double(d.a24)*double(d.a33)*double(d.a42) -
double(d.a13)*double(d.a21)*double(d.a34)*double(d.a42) + double(d.a11)*double(d.a23)*double(d.a34)*double(d.a42) +
double(d.a14)*double(d.a22)*double(d.a31)*double(d.a43) - double(d.a12)*double(d.a24)*double(d.a31)*double(d.a43) -
double(d.a14)*double(d.a21)*double(d.a32)*double(d.a43) + double(d.a11)*double(d.a24)*double(d.a32)*double(d.a43) +
double(d.a12)*double(d.a21)*double(d.a34)*double(d.a43) - double(d.a11)*double(d.a22)*double(d.a34)*double(d.a43) -
double(d.a13)*double(d.a22)*double(d.a31)*double(d.a44) + double(d.a12)*double(d.a23)*double(d.a31)*double(d.a44) +
double(d.a13)*double(d.a21)*double(d.a32)*double(d.a44) - double(d.a11)*double(d.a23)*double(d.a32)*double(d.a44) -
double(d.a12)*double(d.a21)*double(d.a33)*double(d.a44) + double(d.a11)*double(d.a22)*double(d.a33)*double(d.a44));
}
//------------------------------------------------------------------------------
/// Calcula el determinante de una matriz de 4x4.
/// Returns the determinant of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ double Determinant4x4(const tmatrix4d &d){
return(d.a14*d.a23*d.a32*d.a41 - d.a13*d.a24*d.a32*d.a41 -
d.a14*d.a22*d.a33*d.a41 + d.a12*d.a24*d.a33*d.a41 +
d.a13*d.a22*d.a34*d.a41 - d.a12*d.a23*d.a34*d.a41 -
d.a14*d.a23*d.a31*d.a42 + d.a13*d.a24*d.a31*d.a42 +
d.a14*d.a21*d.a33*d.a42 - d.a11*d.a24*d.a33*d.a42 -
d.a13*d.a21*d.a34*d.a42 + d.a11*d.a23*d.a34*d.a42 +
d.a14*d.a22*d.a31*d.a43 - d.a12*d.a24*d.a31*d.a43 -
d.a14*d.a21*d.a32*d.a43 + d.a11*d.a24*d.a32*d.a43 +
d.a12*d.a21*d.a34*d.a43 - d.a11*d.a22*d.a34*d.a43 -
d.a13*d.a22*d.a31*d.a44 + d.a12*d.a23*d.a31*d.a44 +
d.a13*d.a21*d.a32*d.a44 - d.a11*d.a23*d.a32*d.a44 -
d.a12*d.a21*d.a33*d.a44 + d.a11*d.a22*d.a33*d.a44);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 4x4.
/// Returns the inverse matrix of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix4f InverseMatrix4x4(const tmatrix4f &d,const float det){
tmatrix4f inv;
if(det){
inv.a11=(d.a22*(d.a33*d.a44-d.a34*d.a43) + d.a23*(d.a34*d.a42-d.a32*d.a44) + d.a24*(d.a32*d.a43-d.a33*d.a42)) /det;
inv.a21=(d.a21*(d.a34*d.a43-d.a33*d.a44) + d.a23*(d.a31*d.a44-d.a34*d.a41) + d.a24*(d.a33*d.a41-d.a31*d.a43)) /det;
inv.a31=(d.a21*(d.a32*d.a44-d.a34*d.a42) + d.a22*(d.a34*d.a41-d.a31*d.a44) + d.a24*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a41=(d.a21*(d.a33*d.a42-d.a32*d.a43) + d.a22*(d.a31*d.a43-d.a33*d.a41) + d.a23*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a12=(d.a12*(d.a34*d.a43-d.a33*d.a44) + d.a13*(d.a32*d.a44-d.a34*d.a42) + d.a14*(d.a33*d.a42-d.a32*d.a43)) /det;
inv.a22=(d.a11*(d.a33*d.a44-d.a34*d.a43) + d.a13*(d.a34*d.a41-d.a31*d.a44) + d.a14*(d.a31*d.a43-d.a33*d.a41)) /det;
inv.a32=(d.a11*(d.a34*d.a42-d.a32*d.a44) + d.a12*(d.a31*d.a44-d.a34*d.a41) + d.a14*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a42=(d.a11*(d.a32*d.a43-d.a33*d.a42) + d.a12*(d.a33*d.a41-d.a31*d.a43) + d.a13*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a13=(d.a12*(d.a23*d.a44-d.a24*d.a43) + d.a13*(d.a24*d.a42-d.a22*d.a44) + d.a14*(d.a22*d.a43-d.a23*d.a42)) /det;
inv.a23=(d.a11*(d.a24*d.a43-d.a23*d.a44) + d.a13*(d.a21*d.a44-d.a24*d.a41) + d.a14*(d.a23*d.a41-d.a21*d.a43)) /det;
inv.a33=(d.a11*(d.a22*d.a44-d.a24*d.a42) + d.a12*(d.a24*d.a41-d.a21*d.a44) + d.a14*(d.a21*d.a42-d.a22*d.a41)) /det;
inv.a43=(d.a11*(d.a23*d.a42-d.a22*d.a43) + d.a12*(d.a21*d.a43-d.a23*d.a41) + d.a13*(d.a22*d.a41-d.a21*d.a42)) /det;
inv.a14=(d.a12*(d.a24*d.a33-d.a23*d.a34) + d.a13*(d.a22*d.a34-d.a24*d.a32) + d.a14*(d.a23*d.a32-d.a22*d.a33)) /det;
inv.a24=(d.a11*(d.a23*d.a34-d.a24*d.a33) + d.a13*(d.a24*d.a31-d.a21*d.a34) + d.a14*(d.a21*d.a33-d.a23*d.a31)) /det;
inv.a34=(d.a11*(d.a24*d.a32-d.a22*d.a34) + d.a12*(d.a21*d.a34-d.a24*d.a31) + d.a14*(d.a22*d.a31-d.a21*d.a32)) /det;
inv.a44=(d.a11*(d.a22*d.a33-d.a23*d.a32) + d.a12*(d.a23*d.a31-d.a21*d.a33) + d.a13*(d.a21*d.a32-d.a22*d.a31)) /det;
}
else Tmatrix4fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 4x4.
/// Returns the inverse matrix of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix4f InverseMatrix4x4dbl(const tmatrix4f &d,const double det){
tmatrix4f inv;
if(det){
inv.a11=(double(d.a22)*(double(d.a33)*double(d.a44)-double(d.a34)*double(d.a43)) + double(d.a23)*(double(d.a34)*double(d.a42)-double(d.a32)*double(d.a44)) + double(d.a24)*(double(d.a32)*double(d.a43)-double(d.a33)*double(d.a42))) /det;
inv.a21=(double(d.a21)*(double(d.a34)*double(d.a43)-double(d.a33)*double(d.a44)) + double(d.a23)*(double(d.a31)*double(d.a44)-double(d.a34)*double(d.a41)) + double(d.a24)*(double(d.a33)*double(d.a41)-double(d.a31)*double(d.a43))) /det;
inv.a31=(double(d.a21)*(double(d.a32)*double(d.a44)-double(d.a34)*double(d.a42)) + double(d.a22)*(double(d.a34)*double(d.a41)-double(d.a31)*double(d.a44)) + double(d.a24)*(double(d.a31)*double(d.a42)-double(d.a32)*double(d.a41))) /det;
inv.a41=(double(d.a21)*(double(d.a33)*double(d.a42)-double(d.a32)*double(d.a43)) + double(d.a22)*(double(d.a31)*double(d.a43)-double(d.a33)*double(d.a41)) + double(d.a23)*(double(d.a32)*double(d.a41)-double(d.a31)*double(d.a42))) /det;
inv.a12=(double(d.a12)*(double(d.a34)*double(d.a43)-double(d.a33)*double(d.a44)) + double(d.a13)*(double(d.a32)*double(d.a44)-double(d.a34)*double(d.a42)) + double(d.a14)*(double(d.a33)*double(d.a42)-double(d.a32)*double(d.a43))) /det;
inv.a22=(double(d.a11)*(double(d.a33)*double(d.a44)-double(d.a34)*double(d.a43)) + double(d.a13)*(double(d.a34)*double(d.a41)-double(d.a31)*double(d.a44)) + double(d.a14)*(double(d.a31)*double(d.a43)-double(d.a33)*double(d.a41))) /det;
inv.a32=(double(d.a11)*(double(d.a34)*double(d.a42)-double(d.a32)*double(d.a44)) + double(d.a12)*(double(d.a31)*double(d.a44)-double(d.a34)*double(d.a41)) + double(d.a14)*(double(d.a32)*double(d.a41)-double(d.a31)*double(d.a42))) /det;
inv.a42=(double(d.a11)*(double(d.a32)*double(d.a43)-double(d.a33)*double(d.a42)) + double(d.a12)*(double(d.a33)*double(d.a41)-double(d.a31)*double(d.a43)) + double(d.a13)*(double(d.a31)*double(d.a42)-double(d.a32)*double(d.a41))) /det;
inv.a13=(double(d.a12)*(double(d.a23)*double(d.a44)-double(d.a24)*double(d.a43)) + double(d.a13)*(double(d.a24)*double(d.a42)-double(d.a22)*double(d.a44)) + double(d.a14)*(double(d.a22)*double(d.a43)-double(d.a23)*double(d.a42))) /det;
inv.a23=(double(d.a11)*(double(d.a24)*double(d.a43)-double(d.a23)*double(d.a44)) + double(d.a13)*(double(d.a21)*double(d.a44)-double(d.a24)*double(d.a41)) + double(d.a14)*(double(d.a23)*double(d.a41)-double(d.a21)*double(d.a43))) /det;
inv.a33=(double(d.a11)*(double(d.a22)*double(d.a44)-double(d.a24)*double(d.a42)) + double(d.a12)*(double(d.a24)*double(d.a41)-double(d.a21)*double(d.a44)) + double(d.a14)*(double(d.a21)*double(d.a42)-double(d.a22)*double(d.a41))) /det;
inv.a43=(double(d.a11)*(double(d.a23)*double(d.a42)-double(d.a22)*double(d.a43)) + double(d.a12)*(double(d.a21)*double(d.a43)-double(d.a23)*double(d.a41)) + double(d.a13)*(double(d.a22)*double(d.a41)-double(d.a21)*double(d.a42))) /det;
inv.a14=(double(d.a12)*(double(d.a24)*double(d.a33)-double(d.a23)*double(d.a34)) + double(d.a13)*(double(d.a22)*double(d.a34)-double(d.a24)*double(d.a32)) + double(d.a14)*(double(d.a23)*double(d.a32)-double(d.a22)*double(d.a33))) /det;
inv.a24=(double(d.a11)*(double(d.a23)*double(d.a34)-double(d.a24)*double(d.a33)) + double(d.a13)*(double(d.a24)*double(d.a31)-double(d.a21)*double(d.a34)) + double(d.a14)*(double(d.a21)*double(d.a33)-double(d.a23)*double(d.a31))) /det;
inv.a34=(double(d.a11)*(double(d.a24)*double(d.a32)-double(d.a22)*double(d.a34)) + double(d.a12)*(double(d.a21)*double(d.a34)-double(d.a24)*double(d.a31)) + double(d.a14)*(double(d.a22)*double(d.a31)-double(d.a21)*double(d.a32))) /det;
inv.a44=(double(d.a11)*(double(d.a22)*double(d.a33)-double(d.a23)*double(d.a32)) + double(d.a12)*(double(d.a23)*double(d.a31)-double(d.a21)*double(d.a33)) + double(d.a13)*(double(d.a21)*double(d.a32)-double(d.a22)*double(d.a31))) /det;
}
else Tmatrix4fReset(inv);
return(inv);
}
//------------------------------------------------------------------------------
/// Devuelve la matriz inversa de una matriz de 4x4.
/// Returns the inverse matrix of a 4x4 matrix.
//------------------------------------------------------------------------------
__device__ tmatrix4d InverseMatrix4x4(const tmatrix4d &d,const double det){
tmatrix4d inv;
if(det){
inv.a11=(d.a22*(d.a33*d.a44-d.a34*d.a43) + d.a23*(d.a34*d.a42-d.a32*d.a44) + d.a24*(d.a32*d.a43-d.a33*d.a42)) /det;
inv.a21=(d.a21*(d.a34*d.a43-d.a33*d.a44) + d.a23*(d.a31*d.a44-d.a34*d.a41) + d.a24*(d.a33*d.a41-d.a31*d.a43)) /det;
inv.a31=(d.a21*(d.a32*d.a44-d.a34*d.a42) + d.a22*(d.a34*d.a41-d.a31*d.a44) + d.a24*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a41=(d.a21*(d.a33*d.a42-d.a32*d.a43) + d.a22*(d.a31*d.a43-d.a33*d.a41) + d.a23*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a12=(d.a12*(d.a34*d.a43-d.a33*d.a44) + d.a13*(d.a32*d.a44-d.a34*d.a42) + d.a14*(d.a33*d.a42-d.a32*d.a43)) /det;
inv.a22=(d.a11*(d.a33*d.a44-d.a34*d.a43) + d.a13*(d.a34*d.a41-d.a31*d.a44) + d.a14*(d.a31*d.a43-d.a33*d.a41)) /det;
inv.a32=(d.a11*(d.a34*d.a42-d.a32*d.a44) + d.a12*(d.a31*d.a44-d.a34*d.a41) + d.a14*(d.a32*d.a41-d.a31*d.a42)) /det;
inv.a42=(d.a11*(d.a32*d.a43-d.a33*d.a42) + d.a12*(d.a33*d.a41-d.a31*d.a43) + d.a13*(d.a31*d.a42-d.a32*d.a41)) /det;
inv.a13=(d.a12*(d.a23*d.a44-d.a24*d.a43) + d.a13*(d.a24*d.a42-d.a22*d.a44) + d.a14*(d.a22*d.a43-d.a23*d.a42)) /det;
inv.a23=(d.a11*(d.a24*d.a43-d.a23*d.a44) + d.a13*(d.a21*d.a44-d.a24*d.a41) + d.a14*(d.a23*d.a41-d.a21*d.a43)) /det;
inv.a33=(d.a11*(d.a22*d.a44-d.a24*d.a42) + d.a12*(d.a24*d.a41-d.a21*d.a44) + d.a14*(d.a21*d.a42-d.a22*d.a41)) /det;
inv.a43=(d.a11*(d.a23*d.a42-d.a22*d.a43) + d.a12*(d.a21*d.a43-d.a23*d.a41) + d.a13*(d.a22*d.a41-d.a21*d.a42)) /det;
inv.a14=(d.a12*(d.a24*d.a33-d.a23*d.a34) + d.a13*(d.a22*d.a34-d.a24*d.a32) + d.a14*(d.a23*d.a32-d.a22*d.a33)) /det;
inv.a24=(d.a11*(d.a23*d.a34-d.a24*d.a33) + d.a13*(d.a24*d.a31-d.a21*d.a34) + d.a14*(d.a21*d.a33-d.a23*d.a31)) /det;
inv.a34=(d.a11*(d.a24*d.a32-d.a22*d.a34) + d.a12*(d.a21*d.a34-d.a24*d.a31) + d.a14*(d.a22*d.a31-d.a21*d.a32)) /det;
inv.a44=(d.a11*(d.a22*d.a33-d.a23*d.a32) + d.a12*(d.a23*d.a31-d.a21*d.a33) + d.a13*(d.a21*d.a32-d.a22*d.a31)) /det;
}
else Tmatrix4dReset(inv);
return(inv);
}
//==============================================================================
/// Devuelve producto de 2 matrices de 3x3.
/// Returns the product of 2 matrices of 3x3.
//==============================================================================
__device__ tmatrix3f MulMatrix3x3(const tmatrix3f &a,const tmatrix3f &b){
tmatrix3f ret;
ret.a11=a.a11*b.a11 + a.a12*b.a21 + a.a13*b.a31;
ret.a12=a.a11*b.a12 + a.a12*b.a22 + a.a13*b.a32;
ret.a13=a.a11*b.a13 + a.a12*b.a23 + a.a13*b.a33;
ret.a21=a.a21*b.a11 + a.a22*b.a21 + a.a23*b.a31;
ret.a22=a.a21*b.a12 + a.a22*b.a22 + a.a23*b.a32;
ret.a23=a.a21*b.a13 + a.a22*b.a23 + a.a23*b.a33;
ret.a31=a.a31*b.a11 + a.a32*b.a21 + a.a33*b.a31;
ret.a32=a.a31*b.a12 + a.a32*b.a22 + a.a33*b.a32;
ret.a33=a.a31*b.a13 + a.a32*b.a23 + a.a33*b.a33;
return(ret);
}
//==============================================================================
/// Devuelve producto de 2 matrices de 3x3.
/// Returns the product of 2 matrices of 3x3.
//==============================================================================
__device__ tmatrix3d MulMatrix3x3(const tmatrix3d &a,const tmatrix3d &b){
tmatrix3d ret;
ret.a11=a.a11*b.a11 + a.a12*b.a21 + a.a13*b.a31;
ret.a12=a.a11*b.a12 + a.a12*b.a22 + a.a13*b.a32;
ret.a13=a.a11*b.a13 + a.a12*b.a23 + a.a13*b.a33;
ret.a21=a.a21*b.a11 + a.a22*b.a21 + a.a23*b.a31;
ret.a22=a.a21*b.a12 + a.a22*b.a22 + a.a23*b.a32;
ret.a23=a.a21*b.a13 + a.a22*b.a23 + a.a23*b.a33;
ret.a31=a.a31*b.a11 + a.a32*b.a21 + a.a33*b.a31;
ret.a32=a.a31*b.a12 + a.a32*b.a22 + a.a33*b.a32;
ret.a33=a.a31*b.a13 + a.a32*b.a23 + a.a33*b.a33;
return(ret);
}
//==============================================================================
/// Devuelve traspuesta de matriz 3x3.
/// Returns the transpose from matrix 3x3.
//==============================================================================
__device__ tmatrix3f TrasMatrix3x3(const tmatrix3f &a){
tmatrix3f ret;
ret.a11=a.a11; ret.a12=a.a21; ret.a13=a.a31;
ret.a21=a.a12; ret.a22=a.a22; ret.a23=a.a32;
ret.a31=a.a13; ret.a32=a.a23; ret.a33=a.a33;
return(ret);
}
//==============================================================================
/// Devuelve traspuesta de matriz 3x3.
/// Returns the transpose from matrix 3x3.
//==============================================================================
__device__ tmatrix3d TrasMatrix3x3(const tmatrix3d &a){
tmatrix3d ret;
ret.a11=a.a11; ret.a12=a.a21; ret.a13=a.a31;
ret.a21=a.a12; ret.a22=a.a22; ret.a23=a.a32;
ret.a31=a.a13; ret.a32=a.a23; ret.a33=a.a33;
return(ret);
}
//==============================================================================
/// Devuelve la matriz de rotacion.
/// Returns the rotation matrix.
//==============================================================================
__device__ tmatrix3f RotMatrix3x3(const float3 &ang){
const float cosx=cos(ang.x),cosy=cos(ang.y),cosz=cos(ang.z);
const float sinx=sin(ang.x),siny=sin(ang.y),sinz=sin(ang.z);
tmatrix3f ret;
ret.a11= cosy*cosz;
ret.a12=-cosy*sinz;
ret.a13= siny;
ret.a21= sinx*siny*cosz + cosx*sinz;
ret.a22=-sinx*siny*sinz + cosx*cosz;
ret.a23=-sinx*cosy;
ret.a31=-cosx*siny*cosz + sinx*sinz;
ret.a32= cosx*siny*sinz + sinx*cosz;
ret.a33= cosx*cosy;
return(ret);
}
//==============================================================================
/// Devuelve la matriz de rotacion.
/// Returns the rotation matrix.
//==============================================================================
__device__ tmatrix3d RotMatrix3x3(const double3 &ang){
const double cosx=cos(ang.x),cosy=cos(ang.y),cosz=cos(ang.z);
const double sinx=sin(ang.x),siny=sin(ang.y),sinz=sin(ang.z);
tmatrix3d ret;
ret.a11= cosy*cosz;
ret.a12=-cosy*sinz;
ret.a13= siny;
ret.a21= sinx*siny*cosz + cosx*sinz;
ret.a22=-sinx*siny*sinz + cosx*cosz;
ret.a23=-sinx*cosy;
ret.a31=-cosx*siny*cosz + sinx*sinz;
ret.a32= cosx*siny*sinz + sinx*cosz;
ret.a33= cosx*cosy;
return(ret);
}
}
|
03bfc85e181e4ea80d1402b1b2c1aa866a1f4ca2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include<iostream>
#include<math.h>
__global__ void matMulKernel(
float *a, int a_rows, int a_cols,
float *b, int b_rows, int b_cols,
float *c, int c_rows, int c_cols
)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < a_cols; ++i)
{
Cvalue += a[row * a_cols + i] * b[i * b_cols + col];
}
c[row * c_cols + col] = Cvalue;
}
/*
Here is the first version
array A is the input matrix
array B is the projection matrix
array C is the results matrix
__global__ void hash_(
float *A, int a_rows, int a_cols,
float *B, int b_rows, int b_cols,
float *C, int c_rows, int c_cols, float* bits
)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int table_index = row / a_rows;
int input_index = row % a_rows;
for (int i = 0; i < a_cols; ++i)
{
Cvalue += A[input_index * a_cols + i] * B[table_index * (b_rows * b_cols) + i * b_cols + col];
}
C[row * c_cols + col] = std::signbit(-1 * Cvalue) * bits[col];
// C[row * c_cols + col] = Cvalue;
}
__global__ void vec_sum(float* a, float* b, int cols) {
int row = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = 0; j < cols; ++j) {
b[row] += a[row * cols + j];
}
}
*/
__global__ void hash_(
float *A, int a_rows, int a_cols,
float *B, int b_rows, int b_cols,
float *C, int c_rows, int c_cols, float* bits
)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (row < a_rows && col < c_cols){
int table_index = col / b_cols;
int input_index = row;
for (int i = 0; i < a_cols; ++i)
{
Cvalue += A[input_index * a_cols + i] * B[table_index * (b_rows * b_cols) + i * b_cols + col % b_cols];
}
C[row * c_cols + col] = std::signbit(-1 * Cvalue) * bits[col % b_cols];
}
}
__global__ void vec_sum(float* a, float* b, int input_size, int table_nums, int hash_size) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int table_index = blockIdx.x;
if (row < input_size && table_index < table_nums) {
for (int j = 0; j < hash_size; ++j) {
b[row + table_index * input_size] += a[row * (table_nums * hash_size) + j + table_index * hash_size];
}
}
}
|
03bfc85e181e4ea80d1402b1b2c1aa866a1f4ca2.cu
|
#pragma once
#include<iostream>
#include<math.h>
__global__ void matMulKernel(
float *a, int a_rows, int a_cols,
float *b, int b_rows, int b_cols,
float *c, int c_rows, int c_cols
)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < a_cols; ++i)
{
Cvalue += a[row * a_cols + i] * b[i * b_cols + col];
}
c[row * c_cols + col] = Cvalue;
}
/*
Here is the first version
array A is the input matrix
array B is the projection matrix
array C is the results matrix
__global__ void hash_(
float *A, int a_rows, int a_cols,
float *B, int b_rows, int b_cols,
float *C, int c_rows, int c_cols, float* bits
)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int table_index = row / a_rows;
int input_index = row % a_rows;
for (int i = 0; i < a_cols; ++i)
{
Cvalue += A[input_index * a_cols + i] * B[table_index * (b_rows * b_cols) + i * b_cols + col];
}
C[row * c_cols + col] = std::signbit(-1 * Cvalue) * bits[col];
// C[row * c_cols + col] = Cvalue;
}
__global__ void vec_sum(float* a, float* b, int cols) {
int row = threadIdx.x + blockIdx.x * blockDim.x;
for (int j = 0; j < cols; ++j) {
b[row] += a[row * cols + j];
}
}
*/
__global__ void hash_(
float *A, int a_rows, int a_cols,
float *B, int b_rows, int b_cols,
float *C, int c_rows, int c_cols, float* bits
)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (row < a_rows && col < c_cols){
int table_index = col / b_cols;
int input_index = row;
for (int i = 0; i < a_cols; ++i)
{
Cvalue += A[input_index * a_cols + i] * B[table_index * (b_rows * b_cols) + i * b_cols + col % b_cols];
}
C[row * c_cols + col] = std::signbit(-1 * Cvalue) * bits[col % b_cols];
}
}
__global__ void vec_sum(float* a, float* b, int input_size, int table_nums, int hash_size) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int table_index = blockIdx.x;
if (row < input_size && table_index < table_nums) {
for (int j = 0; j < hash_size; ++j) {
b[row + table_index * input_size] += a[row * (table_nums * hash_size) + j + table_index * hash_size];
}
}
}
|
b5c540551b93ebffea1887ed865fed72be5603a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
#include <cstdio>
__constant__ int device_n;
__global__
void add(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// if (threadIdx.x == 0) {
// printf("%d %d %d\n", blockIdx.x, gridDim.x, blockDim.x);
// }
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 28;
size_t size = N * sizeof(float);
float *h_x = (float*)malloc(size);
float *h_y = (float*)malloc(size);
float *d_x, *d_y;
hipMalloc(&d_x, size);
hipMalloc(&d_y, size);
for (int i = 0; i < N; ++i) {
h_x[i] = 1.0f;
h_y[i] = 2.0f;
}
hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, size, hipMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, d_x, d_y);
// hipDeviceSynchronize();
hipMemcpy(h_y, d_y, size, hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(h_y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
hipFree(d_x);
hipFree(d_y);
free(h_x);
free(h_y);
return 0;
}
|
b5c540551b93ebffea1887ed865fed72be5603a8.cu
|
#include <iostream>
#include <cmath>
#include <cstdio>
__constant__ int device_n;
__global__
void add(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// if (threadIdx.x == 0) {
// printf("%d %d %d\n", blockIdx.x, gridDim.x, blockDim.x);
// }
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 28;
size_t size = N * sizeof(float);
float *h_x = (float*)malloc(size);
float *h_y = (float*)malloc(size);
float *d_x, *d_y;
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
for (int i = 0; i < N; ++i) {
h_x[i] = 1.0f;
h_y[i] = 2.0f;
}
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, d_x, d_y);
// cudaDeviceSynchronize();
cudaMemcpy(h_y, d_y, size, cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(h_y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
cudaFree(d_x);
cudaFree(d_y);
free(h_x);
free(h_y);
return 0;
}
|
3c8b981387af18e357e3e050fd7139a60b2721c1.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 256
#define NJ 256
#define NK 256
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
#define NUM_STREAMS 3
#define NUM_CHUNKS 16
#define CHUNK_SIZE NI/NUM_CHUNKS //256/2=256
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( 0 );
}
__global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i;
for (i = 1; i < NI - 1; ++i) // 0
{
hipLaunchKernelGGL(( convolution3D_kernel), dim3(grid), dim3(block) , 0, 0, A_gpu, B_gpu, i);
}
hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.3f Ms \n", elapsedTimeInMs);
hipFree(A_gpu);
hipFree(B_gpu);
}
void convolution3DCuda_async(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//DATA_TYPE *A_gpu;
//DATA_TYPE *B_gpu;
//hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
//hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipStream_t streams[NUM_STREAMS];
for (int i=0; i< NUM_STREAMS; i++)
hipStreamCreate(&(streams[i]));
//hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i,c;
//input initialization
hipMemPrefetchAsync(A, sizeof(DATA_TYPE)*NJ*NK, 0 ,streams[0]);
for (c=0; c < NUM_CHUNKS; c++){
if (c==(NUM_CHUNKS-1)){
hipMemPrefetchAsync(A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*(CHUNK_SIZE-1), 0,streams[c % NUM_STREAMS]);
}else{
hipMemPrefetchAsync(A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE, 0,streams[c % NUM_STREAMS]);
}
for (i = (c*CHUNK_SIZE ); i < ((c+1)*CHUNK_SIZE); ++i) // 0
{
if ((i>=1)&&(i<(NI-1)))
hipLaunchKernelGGL(( convolution3D_kernel), dim3(grid), dim3(block),0,streams[c % NUM_STREAMS] , A,B_outputFromGpu, i);
}
hipMemPrefetchAsync(B_outputFromGpu+c*CHUNK_SIZE*NK*NJ,sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE,hipCpuDeviceId,streams[c % NUM_STREAMS]);
}
//hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//hipFree(A_gpu);
//hipFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
hipMallocManaged((void **)&A, sizeof(DATA_TYPE) * NI * NJ * NK);
hipHostMalloc((void **)&B, sizeof(DATA_TYPE) * NI * NJ * NK, hipHostMallocPortable);
hipMallocManaged((void **)&B_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ *NK);
init(A);
GPU_argv_init();
convolution3DCuda_async(A, B_outputFromGpu);
conv3D(A,B);
compareResults(B, B_outputFromGpu);
hipFree(A);
hipFree(B);
hipFree(B_outputFromGpu);
return 0;
}
|
3c8b981387af18e357e3e050fd7139a60b2721c1.cu
|
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 256
#define NJ 256
#define NK 256
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
#define NUM_STREAMS 3
#define NUM_CHUNKS 16
#define CHUNK_SIZE NI/NUM_CHUNKS //256/2=256
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( 0 );
}
__global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i;
for (i = 1; i < NI - 1; ++i) // 0
{
convolution3D_kernel<<< grid, block >>>(A_gpu, B_gpu, i);
}
cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.3f Ms \n", elapsedTimeInMs);
cudaFree(A_gpu);
cudaFree(B_gpu);
}
void convolution3DCuda_async(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//DATA_TYPE *A_gpu;
//DATA_TYPE *B_gpu;
//cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
//cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaStream_t streams[NUM_STREAMS];
for (int i=0; i< NUM_STREAMS; i++)
cudaStreamCreate(&(streams[i]));
//cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i,c;
//input initialization
cudaMemPrefetchAsync(A, sizeof(DATA_TYPE)*NJ*NK, 0 ,streams[0]);
for (c=0; c < NUM_CHUNKS; c++){
if (c==(NUM_CHUNKS-1)){
cudaMemPrefetchAsync(A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*(CHUNK_SIZE-1), 0,streams[c % NUM_STREAMS]);
}else{
cudaMemPrefetchAsync(A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE, 0,streams[c % NUM_STREAMS]);
}
for (i = (c*CHUNK_SIZE ); i < ((c+1)*CHUNK_SIZE); ++i) // 0
{
if ((i>=1)&&(i<(NI-1)))
convolution3D_kernel<<< grid, block,0,streams[c % NUM_STREAMS] >>>(A,B_outputFromGpu, i);
}
cudaMemPrefetchAsync(B_outputFromGpu+c*CHUNK_SIZE*NK*NJ,sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE,cudaCpuDeviceId,streams[c % NUM_STREAMS]);
}
//cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//cudaFree(A_gpu);
//cudaFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
cudaMallocManaged((void **)&A, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaHostAlloc((void **)&B, sizeof(DATA_TYPE) * NI * NJ * NK, cudaHostAllocPortable);
cudaMallocManaged((void **)&B_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ *NK);
init(A);
GPU_argv_init();
convolution3DCuda_async(A, B_outputFromGpu);
conv3D(A,B);
compareResults(B, B_outputFromGpu);
cudaFree(A);
cudaFree(B);
cudaFree(B_outputFromGpu);
return 0;
}
|
5882998a8de62745039918d2444d308f4fb20799.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "math.h"
//math_constants.h
//#include "math_constants.h"
extern "C" {
#include "maxpool_layer.h"
#include "hip/hip_runtime.h"
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *input, float *output, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = (layer.h-1)/layer.stride + 1;
int w = (layer.w-1)/layer.stride + 1;
int c = layer.c;
size_t n = h*w*c*layer.batch;
hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.delta_gpu, state.delta, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
|
5882998a8de62745039918d2444d308f4fb20799.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "math.h"
//math_constants.h
//#include "math_constants.h"
extern "C" {
#include "maxpool_layer.h"
#include "cuda.h"
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *input, float *output, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = (layer.h-1)/layer.stride + 1;
int w = (layer.w-1)/layer.stride + 1;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.delta_gpu, state.delta, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
|
f502e5a8d986948ce923c960d67412402b9cabdd.hip
|
// !!! This is a file automatically generated by hipify!!!
// To compile - gcc -o 3dFDTD FDTD3D.c -lm
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
// This was taken from stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
printf("GPU error: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
extern __global__ void loop4_GPU(double*** Hx, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = blockIdx.x * 32 + threadIdx.x;
if (k < kmax) {
for (j = 0; j < jmax-1; j++) {
for (i = 1; i < imax-1; i++) {
Hx[i][j][k] = Da*Hx[i][j][k] + Db*((Ez[i][j][k] - Ez[i][j+1][k]) + (Ez[i][j][k+1]-Ez[i][j][k]));
}
}
}
}
extern __global__ void loop5_GPU(double*** Hy, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = blockIdx.x * 32 + threadIdx.x;
if (k < kmax) {
for (j = 1; j < jmax-1; j++) {
for (i = 0; i < imax-1; i++) {
Hy[i][j][k] = Da*Hy[i][j][k] + Db*((Ez[i+1][j][k] - Ez[i][j][k]) + (Ez[i][j][k]-Ez[i][j][k+1]));
}
}
}
}
extern __global__ void loop6_GPU(double*** Hz, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = (blockIdx.x * 32 + threadIdx.x) + 1; // this loop starts at k=1 so we add 1
if (k < kmax) {
for (j = 0; j < jmax-1; j++) {
for (i = 0; i < imax-1; i++) {
Hz[i][j][k] = Da*Hz[i][j][k] + Db*((Ez[i][j][k] - Ez[i+1][j][k]) + (Ez[i][j+1][k]-Ez[i][j][k]));
}
}
}
}
int main() {
printf("Running main\n");
int imax = 100, jmax = 100, nmax = 1000, nhalf = 20, no = nhalf*3, kmax = 100;
int i, j, n,k;
double c = 2.99792458e8, pi = 3.141592654, sigma = 0, mu = 4.0 * pi * 1.0e-7, eps = 8.85418782e-12;
double delta = 1e-3;
double dt = delta/(c*1.41421356237);
double ***Ex, ***Ey, ***Ez, ***Hy, ***Hx, ***Hz;
//struct timeval tstart,tend;
//int sec,usec;
hipEvent_t start_event, stop_event;
float elapsed_time;
Ex = (double ***)malloc((imax+1)*sizeof(double **));
Ey = (double ***)malloc((imax+1)*sizeof(double **));
Ez = (double ***)malloc((imax+1)*sizeof(double **));
Hx = (double ***)malloc((imax+1)*sizeof(double **));
Hy = (double ***)malloc((imax+1)*sizeof(double **));
Hz = (double ***)malloc((imax+1)*sizeof(double **));
for(i=0;i<(imax+1);i++) {
Ex[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ey[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ez[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hx[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hy[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hz[i] = (double **)malloc((jmax+1)*sizeof(double *));
for(j=0;j<(jmax+1);j++) {
Ex[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ey[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ez[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hx[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hy[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hz[i][j] = (double *)malloc((kmax+1)*sizeof(double));
}
}
for(k=0;k<(kmax+1);k++){
for(j=0;j<(jmax+1);j++){
for(i=0;i<(imax+1);i++){
Ex[i][j][k] = 0.0;
Ey[i][j][k] = 0.0;
Ez[i][j][k] = 0.0;
Hx[i][j][k] = 0.0;
Hy[i][j][k] = 0.0;
Hz[i][j][k] = 0.0;
}
}
}
double*** g_Hx;
double*** g_Hy;
double*** g_Hz;
double*** g_Ez;
//fprintf(fPointer, "allocating memory on GPU\n");
CHECK_ERROR(hipMalloc((void**)&g_Hx, (imax+1)*sizeof(double**)));
CHECK_ERROR(hipMalloc((void**)&g_Hy, (imax+1)*sizeof(double**)));
CHECK_ERROR(hipMalloc((void**)&g_Hz, (imax+1)*sizeof(double**)));
CHECK_ERROR(hipMalloc((void**)&g_Ez, (imax+1)*sizeof(double**)));
for(i=0;i<(imax+1);i++) {
CHECK_ERROR(hipMalloc((void**)&g_Hx[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(hipMalloc((void**)&g_Hy[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(hipMalloc((void**)&g_Hz[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(hipMalloc((void**)&g_Ez[i], (jmax+1)*sizeof(double*)));
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(hipMalloc((void**)&g_Hx[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(hipMalloc((void**)&g_Hy[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(hipMalloc((void**)&g_Hz[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(hipMalloc((void**)&g_Ez[i][j], (kmax+1)*sizeof(double)));
}
}
double Ca,Cb,Da,Db;
Ca = (1-((sigma*dt)/(2*eps)))/(1+((sigma*dt)/(2*eps)));
Cb = (dt/(eps*delta))/(1+((sigma*dt)/(2*eps)));
Da = (1-((sigma*dt)/(2*mu)))/(1+((sigma*dt)/(2*mu)));
Db = (dt/(mu*delta))/(1+((sigma*dt)/(2*mu)));
FILE * fPointer;
fPointer = fopen("myoutput3d.dat","w");
CHECK_ERROR(hipEventCreate(&start_event));
CHECK_ERROR(hipEventCreate(&stop_event));
CHECK_ERROR(hipEventRecord(start_event, 0));
for (n = 0; n < nmax; n++) {
char buf[18];
memset(buf, 0, 18);
sprintf(buf, "inside n loop\n");
fputs(buf, fPointer);
for (k = 1; k < kmax; k++) {
for (j = 1; j < jmax; j++) {
for (i = 0; i < imax; i++) {
Ex[i][j][k] = Ca*Ex[i][j][k] + Cb*((Hz[i][j][k] - Hy[i][j-1][k]) + (Hy[i][j][k-1] - Hy[i][j][k]));
}
}
}
for (k = 1; k < kmax; k++) {
for (j = 0; j < jmax; j++) {
for (i = 1; i < imax; i++) {
Ey[i][j][k] = Ca*Ey[i][j][k] + Cb*((Hz[i-1][j][k] - Hy[i][j][k]) + (Hy[i][j][k] - Hy[i][j][k-1]));
}
}
}
for (k = 0; k < kmax; k++) {
for (j = 1; j < jmax; j++) {
for (i = 1; i < imax; i++) {
Ez[i][j][k] = Ca*Ez[i][j][k] + Cb*((Hz[i][j][k] - Hy[i-1][j][k]) + (Hy[i][j-1][k] - Hy[i][j][k]));
}
}
}
Ez[imax/2][jmax/2][kmax/2] = exp(-(pow(((n-no)/(double)nhalf),2.0)));
fprintf(fPointer, "Copying memory to GPU\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(hipMemcpy(g_Hx[i][j], Hx[i][j], (kmax+1)*sizeof(double), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpy(g_Hy[i][j], Hy[i][j], (kmax+1)*sizeof(double), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpy(g_Hz[i][j], Hz[i][j], (kmax+1)*sizeof(double), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpy(g_Ez[i][j], Ez[i][j], (kmax+1)*sizeof(double), hipMemcpyHostToDevice));
}
}
fprintf(fPointer, "Running loops on GPU\n");
dim3 threadsPerBlock(32);
dim3 numBlocks((kmax + threadsPerBlock.x-1) / threadsPerBlock.x);
hipLaunchKernelGGL(( loop4_GPU), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, g_Hx, g_Ez, Da, Db, kmax, jmax, imax);
hipLaunchKernelGGL(( loop5_GPU), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, g_Hy, g_Ez, Da, Db, kmax, jmax, imax);
hipLaunchKernelGGL(( loop6_GPU), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, g_Hz, g_Ez, Da, Db, kmax, jmax, imax);
fprintf(fPointer, "Copying results back to host\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(hipMemcpy(Hx[i][j], g_Hx[i][j], (kmax+1)*sizeof(double), hipMemcpyDeviceToHost));
CHECK_ERROR(hipMemcpy(Hy[i][j], g_Hy[i][j], (kmax+1)*sizeof(double), hipMemcpyDeviceToHost));
CHECK_ERROR(hipMemcpy(Hz[i][j], g_Hz[i][j], (kmax+1)*sizeof(double), hipMemcpyDeviceToHost));
CHECK_ERROR(hipMemcpy(Ez[i][j], g_Ez[i][j], (kmax+1)*sizeof(double), hipMemcpyDeviceToHost));
}
}
}
fprintf(fPointer, "Freeing memory on GPU\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(hipFree(g_Hx[i][j]));
CHECK_ERROR(hipFree(g_Hy[i][j]));
CHECK_ERROR(hipFree(g_Hz[i][j]));
CHECK_ERROR(hipFree(g_Ez[i][j]));
}
CHECK_ERROR(hipFree(g_Hx[i]));
CHECK_ERROR(hipFree(g_Hy[i]));
CHECK_ERROR(hipFree(g_Hz[i]));
CHECK_ERROR(hipFree(g_Ez[i]));
}
CHECK_ERROR(hipFree(g_Hx));
CHECK_ERROR(hipFree(g_Hy));
CHECK_ERROR(hipFree(g_Hz));
CHECK_ERROR(hipFree(g_Ez));
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&elapsed_time, start_event, stop_event);
fclose(fPointer);
printf("GPU Time: %.2f\n", elapsed_time);
return 0;
}
|
f502e5a8d986948ce923c960d67412402b9cabdd.cu
|
// To compile - gcc -o 3dFDTD FDTD3D.c -lm
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
// This was taken from stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
printf("GPU error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
extern __global__ void loop4_GPU(double*** Hx, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = blockIdx.x * 32 + threadIdx.x;
if (k < kmax) {
for (j = 0; j < jmax-1; j++) {
for (i = 1; i < imax-1; i++) {
Hx[i][j][k] = Da*Hx[i][j][k] + Db*((Ez[i][j][k] - Ez[i][j+1][k]) + (Ez[i][j][k+1]-Ez[i][j][k]));
}
}
}
}
extern __global__ void loop5_GPU(double*** Hy, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = blockIdx.x * 32 + threadIdx.x;
if (k < kmax) {
for (j = 1; j < jmax-1; j++) {
for (i = 0; i < imax-1; i++) {
Hy[i][j][k] = Da*Hy[i][j][k] + Db*((Ez[i+1][j][k] - Ez[i][j][k]) + (Ez[i][j][k]-Ez[i][j][k+1]));
}
}
}
}
extern __global__ void loop6_GPU(double*** Hz, double*** Ez, double Da, double Db, int kmax, int jmax, int imax) {
int i, j;
int k = (blockIdx.x * 32 + threadIdx.x) + 1; // this loop starts at k=1 so we add 1
if (k < kmax) {
for (j = 0; j < jmax-1; j++) {
for (i = 0; i < imax-1; i++) {
Hz[i][j][k] = Da*Hz[i][j][k] + Db*((Ez[i][j][k] - Ez[i+1][j][k]) + (Ez[i][j+1][k]-Ez[i][j][k]));
}
}
}
}
int main() {
printf("Running main\n");
int imax = 100, jmax = 100, nmax = 1000, nhalf = 20, no = nhalf*3, kmax = 100;
int i, j, n,k;
double c = 2.99792458e8, pi = 3.141592654, sigma = 0, mu = 4.0 * pi * 1.0e-7, eps = 8.85418782e-12;
double delta = 1e-3;
double dt = delta/(c*1.41421356237);
double ***Ex, ***Ey, ***Ez, ***Hy, ***Hx, ***Hz;
//struct timeval tstart,tend;
//int sec,usec;
cudaEvent_t start_event, stop_event;
float elapsed_time;
Ex = (double ***)malloc((imax+1)*sizeof(double **));
Ey = (double ***)malloc((imax+1)*sizeof(double **));
Ez = (double ***)malloc((imax+1)*sizeof(double **));
Hx = (double ***)malloc((imax+1)*sizeof(double **));
Hy = (double ***)malloc((imax+1)*sizeof(double **));
Hz = (double ***)malloc((imax+1)*sizeof(double **));
for(i=0;i<(imax+1);i++) {
Ex[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ey[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ez[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hx[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hy[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hz[i] = (double **)malloc((jmax+1)*sizeof(double *));
for(j=0;j<(jmax+1);j++) {
Ex[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ey[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ez[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hx[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hy[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hz[i][j] = (double *)malloc((kmax+1)*sizeof(double));
}
}
for(k=0;k<(kmax+1);k++){
for(j=0;j<(jmax+1);j++){
for(i=0;i<(imax+1);i++){
Ex[i][j][k] = 0.0;
Ey[i][j][k] = 0.0;
Ez[i][j][k] = 0.0;
Hx[i][j][k] = 0.0;
Hy[i][j][k] = 0.0;
Hz[i][j][k] = 0.0;
}
}
}
double*** g_Hx;
double*** g_Hy;
double*** g_Hz;
double*** g_Ez;
//fprintf(fPointer, "allocating memory on GPU\n");
CHECK_ERROR(cudaMalloc((void**)&g_Hx, (imax+1)*sizeof(double**)));
CHECK_ERROR(cudaMalloc((void**)&g_Hy, (imax+1)*sizeof(double**)));
CHECK_ERROR(cudaMalloc((void**)&g_Hz, (imax+1)*sizeof(double**)));
CHECK_ERROR(cudaMalloc((void**)&g_Ez, (imax+1)*sizeof(double**)));
for(i=0;i<(imax+1);i++) {
CHECK_ERROR(cudaMalloc((void**)&g_Hx[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(cudaMalloc((void**)&g_Hy[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(cudaMalloc((void**)&g_Hz[i], (jmax+1)*sizeof(double*)));
CHECK_ERROR(cudaMalloc((void**)&g_Ez[i], (jmax+1)*sizeof(double*)));
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaMalloc((void**)&g_Hx[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&g_Hy[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&g_Hz[i][j], (kmax+1)*sizeof(double)));
CHECK_ERROR(cudaMalloc((void**)&g_Ez[i][j], (kmax+1)*sizeof(double)));
}
}
double Ca,Cb,Da,Db;
Ca = (1-((sigma*dt)/(2*eps)))/(1+((sigma*dt)/(2*eps)));
Cb = (dt/(eps*delta))/(1+((sigma*dt)/(2*eps)));
Da = (1-((sigma*dt)/(2*mu)))/(1+((sigma*dt)/(2*mu)));
Db = (dt/(mu*delta))/(1+((sigma*dt)/(2*mu)));
FILE * fPointer;
fPointer = fopen("myoutput3d.dat","w");
CHECK_ERROR(cudaEventCreate(&start_event));
CHECK_ERROR(cudaEventCreate(&stop_event));
CHECK_ERROR(cudaEventRecord(start_event, 0));
for (n = 0; n < nmax; n++) {
char buf[18];
memset(buf, 0, 18);
sprintf(buf, "inside n loop\n");
fputs(buf, fPointer);
for (k = 1; k < kmax; k++) {
for (j = 1; j < jmax; j++) {
for (i = 0; i < imax; i++) {
Ex[i][j][k] = Ca*Ex[i][j][k] + Cb*((Hz[i][j][k] - Hy[i][j-1][k]) + (Hy[i][j][k-1] - Hy[i][j][k]));
}
}
}
for (k = 1; k < kmax; k++) {
for (j = 0; j < jmax; j++) {
for (i = 1; i < imax; i++) {
Ey[i][j][k] = Ca*Ey[i][j][k] + Cb*((Hz[i-1][j][k] - Hy[i][j][k]) + (Hy[i][j][k] - Hy[i][j][k-1]));
}
}
}
for (k = 0; k < kmax; k++) {
for (j = 1; j < jmax; j++) {
for (i = 1; i < imax; i++) {
Ez[i][j][k] = Ca*Ez[i][j][k] + Cb*((Hz[i][j][k] - Hy[i-1][j][k]) + (Hy[i][j-1][k] - Hy[i][j][k]));
}
}
}
Ez[imax/2][jmax/2][kmax/2] = exp(-(pow(((n-no)/(double)nhalf),2.0)));
fprintf(fPointer, "Copying memory to GPU\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaMemcpy(g_Hx[i][j], Hx[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(g_Hy[i][j], Hy[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(g_Hz[i][j], Hz[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(g_Ez[i][j], Ez[i][j], (kmax+1)*sizeof(double), cudaMemcpyHostToDevice));
}
}
fprintf(fPointer, "Running loops on GPU\n");
dim3 threadsPerBlock(32);
dim3 numBlocks((kmax + threadsPerBlock.x-1) / threadsPerBlock.x);
loop4_GPU<<<numBlocks, threadsPerBlock>>>(g_Hx, g_Ez, Da, Db, kmax, jmax, imax);
loop5_GPU<<<numBlocks, threadsPerBlock>>>(g_Hy, g_Ez, Da, Db, kmax, jmax, imax);
loop6_GPU<<<numBlocks, threadsPerBlock>>>(g_Hz, g_Ez, Da, Db, kmax, jmax, imax);
fprintf(fPointer, "Copying results back to host\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaMemcpy(Hx[i][j], g_Hx[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(Hy[i][j], g_Hy[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(Hz[i][j], g_Hz[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(Ez[i][j], g_Ez[i][j], (kmax+1)*sizeof(double), cudaMemcpyDeviceToHost));
}
}
}
fprintf(fPointer, "Freeing memory on GPU\n");
for(i=0;i<(imax+1);i++) {
for(j=0;j<(jmax+1);j++) {
CHECK_ERROR(cudaFree(g_Hx[i][j]));
CHECK_ERROR(cudaFree(g_Hy[i][j]));
CHECK_ERROR(cudaFree(g_Hz[i][j]));
CHECK_ERROR(cudaFree(g_Ez[i][j]));
}
CHECK_ERROR(cudaFree(g_Hx[i]));
CHECK_ERROR(cudaFree(g_Hy[i]));
CHECK_ERROR(cudaFree(g_Hz[i]));
CHECK_ERROR(cudaFree(g_Ez[i]));
}
CHECK_ERROR(cudaFree(g_Hx));
CHECK_ERROR(cudaFree(g_Hy));
CHECK_ERROR(cudaFree(g_Hz));
CHECK_ERROR(cudaFree(g_Ez));
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time, start_event, stop_event);
fclose(fPointer);
printf("GPU Time: %.2f\n", elapsed_time);
return 0;
}
|
09151daf4e9ff3ce6d3d7435156951b85d4d8528.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @Name: vector_dot_int_2.cu
* @Description: Vector Floating-Point Dot Product.
* Multiple blocks, multiple threads per block.
*
* @Author: Giacomo Marciani <[email protected]>
* @Institution: University of Rome Tor Vergata
*
* @Usage: vector_dot_int_2 vectorDim blockSize
*
* Default values:
* vectorDim: 1048576
* blockSize: 256
*
* @See: http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
*/
#include <stdio.h>
#include <math.h>
#include "../../../common/error.h"
#include "../../../common/random.h"
#include "../../../common/vector.h"
#include "../../../common/mathutil.h"
#ifdef DOUBLE
#define REAL double
#else
#define REAL float
#endif
#define EPSILON (float)1e-5
__global__ void vectorDot(const REAL *a, const REAL *b, REAL *c, const unsigned int vectorDim) {
extern __shared__ REAL temp[];
const unsigned int tid = threadIdx.x;
const unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= vectorDim) return;
temp[tid] = a[pos] * b[pos];
__syncthreads();
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
temp[tid] += temp[tid + stride];
}
__syncthreads();
}
if (0 == tid) {
c[blockIdx.x] = temp[0];
}
}
__host__ void gpuVectorDot(const REAL *a, const REAL *b, REAL *result, const unsigned int vectorDim, const dim3 gridDim, const dim3 blockDim) {
REAL *dev_a, *dev_b, *dev_partial; // device copies of a, b, partial
REAL *partial; // host copy for partial result
const unsigned int size_a_b = vectorDim * sizeof(REAL); // bytes for a, b
const unsigned int size_partial = gridDim.x * sizeof(REAL); // bytes for partial
// allocate host copies of partial
HANDLE_NULL(partial = (REAL*)malloc(size_partial));
// allocate device copies of a, b, c
HANDLE_ERROR(hipMalloc((void**)&dev_a, size_a_b));
HANDLE_ERROR(hipMalloc((void**)&dev_b, size_a_b));
HANDLE_ERROR(hipMalloc((void**)&dev_partial, size_partial));
// copy inputs to device
HANDLE_ERROR(hipMemcpyAsync(dev_a, a, size_a_b, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpyAsync(dev_b, b, size_a_b, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemsetAsync(dev_partial, 0.0f, size_partial));
// shared memory settings
const unsigned int sharedMemSize = (unsigned int) blockDim.x * sizeof(REAL);
// launch kernel vectorDot
hipLaunchKernelGGL(( vectorDot), dim3(gridDim), dim3(blockDim), sharedMemSize , 0, dev_a, dev_b, dev_partial, vectorDim);
// copy device result back to host copy of c
HANDLE_ERROR(hipMemcpy(partial, dev_partial, size_partial, hipMemcpyDeviceToHost));
// reduce blocks result
*result = 0.0f;
for (unsigned int block = 0; block < gridDim.x; block++) {
(*result) += partial[block];
}
// free host
free(partial);
// free device
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_partial));
}
int main(const int argc, const char **argv) {
REAL *a, *b, result; // host copies of a, b, result
unsigned int vectorDim; // vector dimension
unsigned int size_a_b; // bytes for a, b
unsigned int gridSize; // grid size
unsigned int blockSize; // block size
hipDeviceProp_t gpuInfo; // gpu properties
// check arguments
if (argc < 3) {
fprintf(stderr, "Usage: %s vectorDim blockSize\n", argv[0]);
exit(1);
}
vectorDim = atoi(argv[1]);
blockSize = atoi(argv[2]);
if (vectorDim < 1) {
fprintf(stderr, "Error: vectorDim expected >= 1, got %d\n", vectorDim);
exit(1);
}
if (!IS_POWER_OF_2(blockSize)) {
fprintf(stderr, "Error: blockSize expected as power of 2, got %d\n", blockSize);
exit(1);
}
// grid settings
gridSize = vectorDim / blockSize;
if (gridSize * blockSize < vectorDim) {
gridSize += 1;
}
dim3 gridDim(gridSize);
dim3 blockDim(blockSize);
size_a_b = vectorDim * sizeof(REAL);
HANDLE_ERROR(hipGetDeviceProperties(&gpuInfo, 0));
printf("----------------------------------\n");
printf("Vector Floating-Point Dot Product\n");
printf("Reduction: sequential addressing\n");
printf("----------------------------------\n");
#ifdef DOUBLE
printf("FP Precision: Double\n");
#else
printf("FP Precision: Single\n");
#endif
printf("Vector Dimension: %d\n", vectorDim);
printf("Grid Size: (%d %d %d) (max: (%d %d %d))\n",
gridDim.x, gridDim.y, gridDim.z,
gpuInfo.maxGridSize[0], gpuInfo.maxGridSize[1], gpuInfo.maxGridSize[2]);
printf("Block Size: (%d %d %d) (max: (%d %d %d))\n",
blockDim.x, blockDim.y, blockDim.z,
gpuInfo.maxThreadsDim[0], gpuInfo.maxThreadsDim[1], gpuInfo.maxThreadsDim[2]);
printf("---------------------------------\n");
// allocate host copies of a, b, c
HANDLE_NULL(a = (REAL*)malloc(size_a_b));
HANDLE_NULL(b = (REAL*)malloc(size_a_b));
// fill a, b with random data
#ifdef DOUBLE
random_vector_double(a, vectorDim);
random_vector_double(b, vectorDim);
#else
random_vector_float(a, vectorDim);
random_vector_float(b, vectorDim);
#endif
// launch kernel vectorDot()
gpuVectorDot(a, b, &result, vectorDim, gridDim, blockDim);
// test result
REAL expected;
#if DOUBLE
vector_dot_double(a, b, &expected, vectorDim);
#else
vector_dot_float(a, b, &expected, vectorDim);
#endif
if (fabs(expected - result) > EPSILON * expected) {
fprintf(stderr, "Error: expected %f, got %f (error:%f %%)\n",
expected, result, (fabs(expected - result) / expected) * 100.0);
} else {
printf("Correct\n");
}
// free host
free(a);
free(b);
return 0;
}
|
09151daf4e9ff3ce6d3d7435156951b85d4d8528.cu
|
/*
* @Name: vector_dot_int_2.cu
* @Description: Vector Floating-Point Dot Product.
* Multiple blocks, multiple threads per block.
*
* @Author: Giacomo Marciani <[email protected]>
* @Institution: University of Rome Tor Vergata
*
* @Usage: vector_dot_int_2 vectorDim blockSize
*
* Default values:
* vectorDim: 1048576
* blockSize: 256
*
* @See: http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
*/
#include <stdio.h>
#include <math.h>
#include "../../../common/error.h"
#include "../../../common/random.h"
#include "../../../common/vector.h"
#include "../../../common/mathutil.h"
#ifdef DOUBLE
#define REAL double
#else
#define REAL float
#endif
#define EPSILON (float)1e-5
__global__ void vectorDot(const REAL *a, const REAL *b, REAL *c, const unsigned int vectorDim) {
extern __shared__ REAL temp[];
const unsigned int tid = threadIdx.x;
const unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos >= vectorDim) return;
temp[tid] = a[pos] * b[pos];
__syncthreads();
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
temp[tid] += temp[tid + stride];
}
__syncthreads();
}
if (0 == tid) {
c[blockIdx.x] = temp[0];
}
}
__host__ void gpuVectorDot(const REAL *a, const REAL *b, REAL *result, const unsigned int vectorDim, const dim3 gridDim, const dim3 blockDim) {
REAL *dev_a, *dev_b, *dev_partial; // device copies of a, b, partial
REAL *partial; // host copy for partial result
const unsigned int size_a_b = vectorDim * sizeof(REAL); // bytes for a, b
const unsigned int size_partial = gridDim.x * sizeof(REAL); // bytes for partial
// allocate host copies of partial
HANDLE_NULL(partial = (REAL*)malloc(size_partial));
// allocate device copies of a, b, c
HANDLE_ERROR(cudaMalloc((void**)&dev_a, size_a_b));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, size_a_b));
HANDLE_ERROR(cudaMalloc((void**)&dev_partial, size_partial));
// copy inputs to device
HANDLE_ERROR(cudaMemcpyAsync(dev_a, a, size_a_b, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpyAsync(dev_b, b, size_a_b, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemsetAsync(dev_partial, 0.0f, size_partial));
// shared memory settings
const unsigned int sharedMemSize = (unsigned int) blockDim.x * sizeof(REAL);
// launch kernel vectorDot
vectorDot<<< gridDim, blockDim, sharedMemSize >>>(dev_a, dev_b, dev_partial, vectorDim);
// copy device result back to host copy of c
HANDLE_ERROR(cudaMemcpy(partial, dev_partial, size_partial, cudaMemcpyDeviceToHost));
// reduce blocks result
*result = 0.0f;
for (unsigned int block = 0; block < gridDim.x; block++) {
(*result) += partial[block];
}
// free host
free(partial);
// free device
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_partial));
}
int main(const int argc, const char **argv) {
REAL *a, *b, result; // host copies of a, b, result
unsigned int vectorDim; // vector dimension
unsigned int size_a_b; // bytes for a, b
unsigned int gridSize; // grid size
unsigned int blockSize; // block size
cudaDeviceProp gpuInfo; // gpu properties
// check arguments
if (argc < 3) {
fprintf(stderr, "Usage: %s vectorDim blockSize\n", argv[0]);
exit(1);
}
vectorDim = atoi(argv[1]);
blockSize = atoi(argv[2]);
if (vectorDim < 1) {
fprintf(stderr, "Error: vectorDim expected >= 1, got %d\n", vectorDim);
exit(1);
}
if (!IS_POWER_OF_2(blockSize)) {
fprintf(stderr, "Error: blockSize expected as power of 2, got %d\n", blockSize);
exit(1);
}
// grid settings
gridSize = vectorDim / blockSize;
if (gridSize * blockSize < vectorDim) {
gridSize += 1;
}
dim3 gridDim(gridSize);
dim3 blockDim(blockSize);
size_a_b = vectorDim * sizeof(REAL);
HANDLE_ERROR(cudaGetDeviceProperties(&gpuInfo, 0));
printf("----------------------------------\n");
printf("Vector Floating-Point Dot Product\n");
printf("Reduction: sequential addressing\n");
printf("----------------------------------\n");
#ifdef DOUBLE
printf("FP Precision: Double\n");
#else
printf("FP Precision: Single\n");
#endif
printf("Vector Dimension: %d\n", vectorDim);
printf("Grid Size: (%d %d %d) (max: (%d %d %d))\n",
gridDim.x, gridDim.y, gridDim.z,
gpuInfo.maxGridSize[0], gpuInfo.maxGridSize[1], gpuInfo.maxGridSize[2]);
printf("Block Size: (%d %d %d) (max: (%d %d %d))\n",
blockDim.x, blockDim.y, blockDim.z,
gpuInfo.maxThreadsDim[0], gpuInfo.maxThreadsDim[1], gpuInfo.maxThreadsDim[2]);
printf("---------------------------------\n");
// allocate host copies of a, b, c
HANDLE_NULL(a = (REAL*)malloc(size_a_b));
HANDLE_NULL(b = (REAL*)malloc(size_a_b));
// fill a, b with random data
#ifdef DOUBLE
random_vector_double(a, vectorDim);
random_vector_double(b, vectorDim);
#else
random_vector_float(a, vectorDim);
random_vector_float(b, vectorDim);
#endif
// launch kernel vectorDot()
gpuVectorDot(a, b, &result, vectorDim, gridDim, blockDim);
// test result
REAL expected;
#if DOUBLE
vector_dot_double(a, b, &expected, vectorDim);
#else
vector_dot_float(a, b, &expected, vectorDim);
#endif
if (fabs(expected - result) > EPSILON * expected) {
fprintf(stderr, "Error: expected %f, got %f (error:%f %%)\n",
expected, result, (fabs(expected - result) / expected) * 100.0);
} else {
printf("Correct\n");
}
// free host
free(a);
free(b);
return 0;
}
|
9ee5e534920bcf24653e09808e9427d61cced754.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stencil_3d07.hpp"
#include "helper_math.h"
#include "helper_cuda.h"
texture<float, 1, hipReadModeElementType> tex; // 3D texture
void stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int blockx, int blocky, int blockz, int ilp, int halo, hipStream_t stream);
__global__
void __stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int ilp, int halo);
void stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int blockx, int blocky, int blockz, int ilp, int halo, hipStream_t stream)
{
hipBindTexture(NULL, tex, deviceSrc, dimx*dimy*dimz*sizeof(float));
dim3 blockDim(blockx, blocky, blockz);
dim3 gridDim(
(dimx/blockDim.x + ((dimx%blockDim.x)?1:0)),
(dimy/blockDim.y + ((dimy%blockDim.y)?1:0)),
(dimz/blockDim.z + ((dimz%blockDim.z)?1:0)) );
size_t sharedMemSize = (blockDim.x+2*halo)*(blockDim.y+2*halo)*(blockDim.z+2*halo)*sizeof(float);
hipLaunchKernelGGL(( __stencil_3d07), dim3(gridDim), dim3(blockDim), sharedMemSize, stream,
deviceSrc, deviceDst, dimx, dimy, dimz, ilp, halo);
}
#define at(x, y, z, dimx, dimy, dimz) ( clamp((int)(z), 0, dimz-1)*dimy*dimx + \
clamp((int)(y), 0, dimy-1)*dimx + \
clamp((int)(x), 0, dimx-1) )
__global__
void __stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int ilp, int halo)
{
extern __shared__ float sharedMemSrc[];
int shared_index_1d, global_index_1d, index_1d;
int3 shared_index_3d, global_index_3d, index_3d;
// Multi batch reading here
int3 sharedMemDim = make_int3(blockDim.x+2*halo,
blockDim.y+2*halo,
blockDim.z+2*halo);
int sharedMemSize = sharedMemDim.x*sharedMemDim.y*sharedMemDim.z;
int3 blockSizeDim = make_int3(blockDim.x+0*halo,
blockDim.y+0*halo,
blockDim.z+0*halo);
int blockSize = blockSizeDim.x*blockSizeDim.y*blockSizeDim.z;
int numBatches = sharedMemSize/blockSize + ((sharedMemSize%blockSize)?1:0);
for(int batch=0; batch<numBatches; batch++)
{
if(threadIdx.y<(blockDim.y/ilp))
{
#pragma unroll
for(int i=0; i<ilp; i++)
{
shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
(threadIdx.y + i * (blockDim.y/ilp)) * blockDim.x +
threadIdx.x +
blockSize*batch; //Magic is here [email protected]
shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) % (blockDim.x+2*halo),
(shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) / (blockDim.x+2*halo),
(shared_index_1d / ((blockDim.y+2*halo)*(blockDim.x+2*halo))) );
global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - halo,
blockIdx.y * blockDim.y + shared_index_3d.y - halo,
blockIdx.z * blockDim.z + shared_index_3d.z - halo);
global_index_1d = global_index_3d.z * dimy * dimx +
global_index_3d.y * dimx +
global_index_3d.x;
if (shared_index_3d.z < (blockDim.z + 2*halo))
{
if(global_index_3d.z >= 0 && global_index_3d.z < dimz &&
global_index_3d.y >= 0 && global_index_3d.y < dimy &&
global_index_3d.x >= 0 && global_index_3d.x < dimx)
{
// sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = deviceSrc[global_index_1d];
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = tex1Dfetch(tex, global_index_1d);
}
else
{
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = -1;
}
}
}
}
}
__syncthreads();
float alpha = -6.0f;
float beta = +0.1f;
// Stencil processing here
// float result = sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)];
float tmp = beta*(sharedMemSrc[at(threadIdx.x + halo +1, threadIdx.y + halo+0, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo -1, threadIdx.y + halo+0, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo+1, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo-1, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo+0, threadIdx.z + halo+1, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo+0, threadIdx.z + halo-1, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]);
float result = alpha*sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] + tmp;
// Single pass writing here
index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
if (index_3d.z < dimz &&
index_3d.y < dimy &&
index_3d.x < dimx)
{
deviceDst[index_1d] = result;
}
}
|
9ee5e534920bcf24653e09808e9427d61cced754.cu
|
#include "stencil_3d07.hpp"
#include "helper_math.h"
#include "helper_cuda.h"
texture<float, 1, cudaReadModeElementType> tex; // 3D texture
void stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int blockx, int blocky, int blockz, int ilp, int halo, cudaStream_t stream);
__global__
void __stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int ilp, int halo);
void stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int blockx, int blocky, int blockz, int ilp, int halo, cudaStream_t stream)
{
cudaBindTexture(NULL, tex, deviceSrc, dimx*dimy*dimz*sizeof(float));
dim3 blockDim(blockx, blocky, blockz);
dim3 gridDim(
(dimx/blockDim.x + ((dimx%blockDim.x)?1:0)),
(dimy/blockDim.y + ((dimy%blockDim.y)?1:0)),
(dimz/blockDim.z + ((dimz%blockDim.z)?1:0)) );
size_t sharedMemSize = (blockDim.x+2*halo)*(blockDim.y+2*halo)*(blockDim.z+2*halo)*sizeof(float);
__stencil_3d07<<<gridDim, blockDim, sharedMemSize, stream>>>
(deviceSrc, deviceDst, dimx, dimy, dimz, ilp, halo);
}
#define at(x, y, z, dimx, dimy, dimz) ( clamp((int)(z), 0, dimz-1)*dimy*dimx + \
clamp((int)(y), 0, dimy-1)*dimx + \
clamp((int)(x), 0, dimx-1) )
__global__
void __stencil_3d07(float* deviceSrc, float* deviceDst, int dimx, int dimy, int dimz, int ilp, int halo)
{
extern __shared__ float sharedMemSrc[];
int shared_index_1d, global_index_1d, index_1d;
int3 shared_index_3d, global_index_3d, index_3d;
// Multi batch reading here
int3 sharedMemDim = make_int3(blockDim.x+2*halo,
blockDim.y+2*halo,
blockDim.z+2*halo);
int sharedMemSize = sharedMemDim.x*sharedMemDim.y*sharedMemDim.z;
int3 blockSizeDim = make_int3(blockDim.x+0*halo,
blockDim.y+0*halo,
blockDim.z+0*halo);
int blockSize = blockSizeDim.x*blockSizeDim.y*blockSizeDim.z;
int numBatches = sharedMemSize/blockSize + ((sharedMemSize%blockSize)?1:0);
for(int batch=0; batch<numBatches; batch++)
{
if(threadIdx.y<(blockDim.y/ilp))
{
#pragma unroll
for(int i=0; i<ilp; i++)
{
shared_index_1d = threadIdx.z * blockDim.y * blockDim.x +
(threadIdx.y + i * (blockDim.y/ilp)) * blockDim.x +
threadIdx.x +
blockSize*batch; //Magic is here [email protected]
shared_index_3d = make_int3((shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) % (blockDim.x+2*halo),
(shared_index_1d % ((blockDim.y+2*halo)*(blockDim.x+2*halo))) / (blockDim.x+2*halo),
(shared_index_1d / ((blockDim.y+2*halo)*(blockDim.x+2*halo))) );
global_index_3d = make_int3(blockIdx.x * blockDim.x + shared_index_3d.x - halo,
blockIdx.y * blockDim.y + shared_index_3d.y - halo,
blockIdx.z * blockDim.z + shared_index_3d.z - halo);
global_index_1d = global_index_3d.z * dimy * dimx +
global_index_3d.y * dimx +
global_index_3d.x;
if (shared_index_3d.z < (blockDim.z + 2*halo))
{
if(global_index_3d.z >= 0 && global_index_3d.z < dimz &&
global_index_3d.y >= 0 && global_index_3d.y < dimy &&
global_index_3d.x >= 0 && global_index_3d.x < dimx)
{
// sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = deviceSrc[global_index_1d];
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = tex1Dfetch(tex, global_index_1d);
}
else
{
sharedMemSrc[at(shared_index_3d.x, shared_index_3d.y, shared_index_3d.z, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] = -1;
}
}
}
}
}
__syncthreads();
float alpha = -6.0f;
float beta = +0.1f;
// Stencil processing here
// float result = sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)];
float tmp = beta*(sharedMemSrc[at(threadIdx.x + halo +1, threadIdx.y + halo+0, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo -1, threadIdx.y + halo+0, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo+1, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo-1, threadIdx.z + halo+0, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo+0, threadIdx.z + halo+1, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] +
sharedMemSrc[at(threadIdx.x + halo +0, threadIdx.y + halo+0, threadIdx.z + halo-1, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)]);
float result = alpha*sharedMemSrc[at(threadIdx.x + halo, threadIdx.y + halo, threadIdx.z + halo, sharedMemDim.x, sharedMemDim.y, sharedMemDim.z)] + tmp;
// Single pass writing here
index_3d = make_int3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
if (index_3d.z < dimz &&
index_3d.y < dimy &&
index_3d.x < dimx)
{
deviceDst[index_1d] = result;
}
}
|
5a0f83ab49d1f407d67783ae5b8003801182561f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Convolutionf32.h"
#include "CommonOp.cuh"
/***
* Kernel functions
* cuda_Convolution2Df32
* cuda_Convolution2DReluf32
* cuda_Convolution2DReluf32
* cuda_Convolution2DWeightsGradReluf32
***/
//
__global__
void cuda_Convolution2Df32(float* target, float* image, float* kernel, float* bias, int nx, int ny,
int channels, int k,int batchSize, int outputChannels) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < nx && j < ny) {
int imgSize = nx * ny;
int kernelSize = k * k;
int offset = k / 2;
int xof = i - offset;
int yof = j - offset;
int maxCol = yof + k;
if (maxCol > ny) {
++maxCol -= ny;
}
else {
++maxCol = k;
}
int maxRow = xof + k;
if (maxRow > nx) {
++maxRow -= nx;
}
else {
++maxRow = k;
}
for (int t = 0; t < batchSize;t++) {
//printf("%d\n", (size_t)image);
float val = 0;
for (int ch = 0; ch < channels; ch++) {
int chOffset = ch * imgSize;
int kernelOffset = ch * kernelSize;
for (int col = max(0, -yof); col < maxCol; col++) {
int totalOffset = (yof + col) * nx + chOffset + xof;
int totalKernelOffset = kernelOffset + col * k;
for (int row = max(0, -xof); row < maxRow; row++) {
val += image[row + totalOffset] * kernel[row + totalKernelOffset];
}
}
}
if (bias) {
target[i + j * nx] = bias[i + j * nx] + val;
}
else {
target[i + j * nx] = val;
}
image += imgSize * channels;
target += imgSize * outputChannels;
}
}
}
__global__ //Each thread computes one position of the gradient
void cuda_Convolution2DWeightsGradf32(float* weigthsGrad, float* image, float* outputGrad,
int nx, int ny, int channels, int k, int outputChannels, int batchSize) {
int ki = threadIdx.x + blockIdx.x * blockDim.x;
int kj = threadIdx.y + blockIdx.y * blockDim.y;
int kz = threadIdx.z + blockIdx.z * blockDim.z;
if (ki < k && kj < k && kz < channels) {
int imgSize = nx * ny;
image = image + kz * imgSize;
int offset = k / 2;
int infXOff = max(ki - k / 2, 0);
int infYOff = max(kj - k / 2, 0);
int supXOff = min(nx, nx + ki - offset);
int supYOff = min(ny, ny + kj - offset);
float val = 0;
for (int t = 0 ; t < batchSize; t++) {
for (int j = max(infYOff, 0); j < supYOff; j++) {
for (int i = infXOff; i < supXOff; i++) {
val += image[i + j * ny] * outputGrad[i + j * ny];
}
}
image += imgSize * channels;
outputGrad += imgSize * outputChannels;
}
weigthsGrad[ki + k * (kj + k * kz)] += val;
}
}
__global__ //Each thread computes one position of the gradient
void cuda_Convolution2DInputGradf32(float* inputGrad, float* kernel, float* outputGrad,
int nx, int ny, int channels, int k, int outputChannels, int batchSize) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
if (i < nx && j < ny && z < channels) {
int index = i + j * ny;
int imgSize = nx * ny;
inputGrad = inputGrad + z * imgSize;
kernel = kernel + z * k * k;
int offset = k / 2;
int xof = i - offset;
int yof = j - offset;
int maxCol= yof + k;
if (maxCol > ny) {
++maxCol -= ny;
}else {
++maxCol = k;
}
int maxRow = xof + k;
if (maxRow > nx) {
++maxRow -= nx;
}
else {
++maxRow = k;
}
for (int col = max(0,-yof); col < maxCol; col++) {
int colOffset = col * k;
for (int row = max(0,-xof); row < maxRow; row++) {
inputGrad[index] += kernel[row + colOffset]*outputGrad[xof + row + k*(yof+col)];
}
}
}
}
/***
* Host functions
* Convolution2Df32
* Convolution2DWeightsGradf32
*
***/
hipError_t Convolution2Df32(float* target, float* image, float* kernel, float* bias, int nx, int ny,
int channels, int k,int batchSize, int outputChannels, hipStream_t stream) {
dim3 blocks((nx + 15) / 16, (ny + 15) / 16, 1);
dim3 threads(16, 16, 1);
cuda_Convolution2Df32 << <blocks, threads,0,stream >> > (target, image, kernel, bias, nx, ny, channels, k,batchSize, outputChannels);
hipDeviceSynchronize();
return hipGetLastError();
}
hipError_t Convolution2DWeightsGradf32(float* weigthsGrad, float* image, float* outputGrad,
int nx, int ny, int channels, int k,int outputChannels,int batchSize, hipStream_t stream) {
dim3 blocks((k + 15) / 16, (k + 15) / 16, (channels + 3) / 4);
dim3 threads(16, 16, 4);
cuda_Convolution2DWeightsGradf32 << <blocks, threads,0,stream >> > (weigthsGrad, image, outputGrad, nx, ny,
channels, k,outputChannels,batchSize);
hipDeviceSynchronize();
return hipGetLastError();
}
hipError_t Convolution2DInputGradf32(float* inputGrad, float* kernel, float* outputGrad,
int nx, int ny, int channels, int k, int outputChannels, int batchSize, hipStream_t stream) {
dim3 blocks((k + 15) / 16, (k + 15) / 16, (channels + 3) / 4);
dim3 threads(16, 16, 4);
cuda_Convolution2DInputGradf32 << <blocks, threads,0,stream >> > (inputGrad, kernel, outputGrad, nx, ny,
channels, k,outputChannels,batchSize);
hipDeviceSynchronize();
return hipGetLastError();
}
|
5a0f83ab49d1f407d67783ae5b8003801182561f.cu
|
#include "Convolutionf32.h"
#include "CommonOp.cuh"
/***
* Kernel functions
* cuda_Convolution2Df32
* cuda_Convolution2DReluf32
* cuda_Convolution2DReluf32
* cuda_Convolution2DWeightsGradReluf32
***/
//
__global__
void cuda_Convolution2Df32(float* target, float* image, float* kernel, float* bias, int nx, int ny,
int channels, int k,int batchSize, int outputChannels) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < nx && j < ny) {
int imgSize = nx * ny;
int kernelSize = k * k;
int offset = k / 2;
int xof = i - offset;
int yof = j - offset;
int maxCol = yof + k;
if (maxCol > ny) {
++maxCol -= ny;
}
else {
++maxCol = k;
}
int maxRow = xof + k;
if (maxRow > nx) {
++maxRow -= nx;
}
else {
++maxRow = k;
}
for (int t = 0; t < batchSize;t++) {
//printf("%d\n", (size_t)image);
float val = 0;
for (int ch = 0; ch < channels; ch++) {
int chOffset = ch * imgSize;
int kernelOffset = ch * kernelSize;
for (int col = max(0, -yof); col < maxCol; col++) {
int totalOffset = (yof + col) * nx + chOffset + xof;
int totalKernelOffset = kernelOffset + col * k;
for (int row = max(0, -xof); row < maxRow; row++) {
val += image[row + totalOffset] * kernel[row + totalKernelOffset];
}
}
}
if (bias) {
target[i + j * nx] = bias[i + j * nx] + val;
}
else {
target[i + j * nx] = val;
}
image += imgSize * channels;
target += imgSize * outputChannels;
}
}
}
__global__ //Each thread computes one position of the gradient
void cuda_Convolution2DWeightsGradf32(float* weigthsGrad, float* image, float* outputGrad,
int nx, int ny, int channels, int k, int outputChannels, int batchSize) {
int ki = threadIdx.x + blockIdx.x * blockDim.x;
int kj = threadIdx.y + blockIdx.y * blockDim.y;
int kz = threadIdx.z + blockIdx.z * blockDim.z;
if (ki < k && kj < k && kz < channels) {
int imgSize = nx * ny;
image = image + kz * imgSize;
int offset = k / 2;
int infXOff = max(ki - k / 2, 0);
int infYOff = max(kj - k / 2, 0);
int supXOff = min(nx, nx + ki - offset);
int supYOff = min(ny, ny + kj - offset);
float val = 0;
for (int t = 0 ; t < batchSize; t++) {
for (int j = max(infYOff, 0); j < supYOff; j++) {
for (int i = infXOff; i < supXOff; i++) {
val += image[i + j * ny] * outputGrad[i + j * ny];
}
}
image += imgSize * channels;
outputGrad += imgSize * outputChannels;
}
weigthsGrad[ki + k * (kj + k * kz)] += val;
}
}
__global__ //Each thread computes one position of the gradient
void cuda_Convolution2DInputGradf32(float* inputGrad, float* kernel, float* outputGrad,
int nx, int ny, int channels, int k, int outputChannels, int batchSize) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
if (i < nx && j < ny && z < channels) {
int index = i + j * ny;
int imgSize = nx * ny;
inputGrad = inputGrad + z * imgSize;
kernel = kernel + z * k * k;
int offset = k / 2;
int xof = i - offset;
int yof = j - offset;
int maxCol= yof + k;
if (maxCol > ny) {
++maxCol -= ny;
}else {
++maxCol = k;
}
int maxRow = xof + k;
if (maxRow > nx) {
++maxRow -= nx;
}
else {
++maxRow = k;
}
for (int col = max(0,-yof); col < maxCol; col++) {
int colOffset = col * k;
for (int row = max(0,-xof); row < maxRow; row++) {
inputGrad[index] += kernel[row + colOffset]*outputGrad[xof + row + k*(yof+col)];
}
}
}
}
/***
* Host functions
* Convolution2Df32
* Convolution2DWeightsGradf32
*
***/
cudaError_t Convolution2Df32(float* target, float* image, float* kernel, float* bias, int nx, int ny,
int channels, int k,int batchSize, int outputChannels, cudaStream_t stream) {
dim3 blocks((nx + 15) / 16, (ny + 15) / 16, 1);
dim3 threads(16, 16, 1);
cuda_Convolution2Df32 << <blocks, threads,0,stream >> > (target, image, kernel, bias, nx, ny, channels, k,batchSize, outputChannels);
cudaDeviceSynchronize();
return cudaGetLastError();
}
cudaError_t Convolution2DWeightsGradf32(float* weigthsGrad, float* image, float* outputGrad,
int nx, int ny, int channels, int k,int outputChannels,int batchSize, cudaStream_t stream) {
dim3 blocks((k + 15) / 16, (k + 15) / 16, (channels + 3) / 4);
dim3 threads(16, 16, 4);
cuda_Convolution2DWeightsGradf32 << <blocks, threads,0,stream >> > (weigthsGrad, image, outputGrad, nx, ny,
channels, k,outputChannels,batchSize);
cudaDeviceSynchronize();
return cudaGetLastError();
}
cudaError_t Convolution2DInputGradf32(float* inputGrad, float* kernel, float* outputGrad,
int nx, int ny, int channels, int k, int outputChannels, int batchSize, cudaStream_t stream) {
dim3 blocks((k + 15) / 16, (k + 15) / 16, (channels + 3) / 4);
dim3 threads(16, 16, 4);
cuda_Convolution2DInputGradf32 << <blocks, threads,0,stream >> > (inputGrad, kernel, outputGrad, nx, ny,
channels, k,outputChannels,batchSize);
cudaDeviceSynchronize();
return cudaGetLastError();
}
|
683c9dc60f4a65e846c3a91994de3e357b9b5a8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "struct_params.h"
__global__ void add_kernel(Paras* para, int size)
{
int tid = threadIdx.x;
if (tid < size)
{
para->d_c[tid] = para->d_a[tid] + para->d_b[tid];
}
}
void struct_para_main()
{
const int size = 10;
const int bytes = size * sizeof(float);
// alloc struct on the host
Paras* h_para; // struct host
hipHostMalloc((void**)&h_para, sizeof(Paras));
initP(h_para, bytes);
// init values of h_a, h_b
for (int i = 0; i < size; i++)
{
h_para->h_a[i] = 1.1f;
h_para->h_b[i] = 2.1f;
}
// mem cpy host 2 dev
hipMemcpy(h_para->d_a, h_para->h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(h_para->d_b, h_para->h_b, bytes, hipMemcpyHostToDevice);
add_kernel << <1, size >> > (h_para, size);
// mem cpy dev 2 host
hipMemcpy(h_para->h_c, h_para->d_c, bytes, hipMemcpyDeviceToHost);
for (int i = 0; i < size; i++)
{
cout << h_para->h_c[i] << " ";
}
cout << endl;
// free
hipFree(h_para->d_a);
hipFree(h_para->d_b);
hipFree(h_para->d_c);
hipHostFree(h_para->h_a);
hipHostFree(h_para->h_b);
hipHostFree(h_para->h_c);
hipHostFree(h_para);
}
void initP(Paras * h_para, int bytes)
{
// alloc params in struct on the host/dev
hipHostMalloc((void**)&h_para->h_a, bytes);
hipHostMalloc((void**)&h_para->h_b, bytes);
hipHostMalloc((void**)&h_para->h_c, bytes);
hipMalloc((void**)&h_para->d_a, bytes);
hipMalloc((void**)&h_para->d_b, bytes);
hipMalloc((void**)&h_para->d_c, bytes);
}
|
683c9dc60f4a65e846c3a91994de3e357b9b5a8e.cu
|
#include "struct_params.h"
__global__ void add_kernel(Paras* para, int size)
{
int tid = threadIdx.x;
if (tid < size)
{
para->d_c[tid] = para->d_a[tid] + para->d_b[tid];
}
}
void struct_para_main()
{
const int size = 10;
const int bytes = size * sizeof(float);
// alloc struct on the host
Paras* h_para; // struct 只需要在host端定义即可
cudaMallocHost((void**)&h_para, sizeof(Paras));
initP(h_para, bytes);
// init values of h_a, h_b
for (int i = 0; i < size; i++)
{
h_para->h_a[i] = 1.1f;
h_para->h_b[i] = 2.1f;
}
// mem cpy host 2 dev
cudaMemcpy(h_para->d_a, h_para->h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(h_para->d_b, h_para->h_b, bytes, cudaMemcpyHostToDevice);
add_kernel << <1, size >> > (h_para, size);
// mem cpy dev 2 host
cudaMemcpy(h_para->h_c, h_para->d_c, bytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < size; i++)
{
cout << h_para->h_c[i] << " ";
}
cout << endl;
// free
cudaFree(h_para->d_a);
cudaFree(h_para->d_b);
cudaFree(h_para->d_c);
cudaFreeHost(h_para->h_a);
cudaFreeHost(h_para->h_b);
cudaFreeHost(h_para->h_c);
cudaFreeHost(h_para);
}
void initP(Paras * h_para, int bytes)
{
// alloc params in struct on the host/dev
cudaMallocHost((void**)&h_para->h_a, bytes);
cudaMallocHost((void**)&h_para->h_b, bytes);
cudaMallocHost((void**)&h_para->h_c, bytes);
cudaMalloc((void**)&h_para->d_a, bytes);
cudaMalloc((void**)&h_para->d_b, bytes);
cudaMalloc((void**)&h_para->d_c, bytes);
}
|
3df73c743c0669d0ba9d9c22b736764b39d4430b.hip
|
// !!! This is a file automatically generated by hipify!!!
//Use multiple block multiple thread
aaa
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define WIDTH 512
#define threadDim 16
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width);
void MatMul(float *M, float *N, float *P, int width);
int main(int argc, char *argv[])
{
int width = WIDTH;
float M[WIDTH*WIDTH] = {0};
float N[WIDTH*WIDTH] = {0};
float P[WIDTH*WIDTH] = {0};
float MxN[WIDTH*WIDTH] = {0};
int pass = 1;
srand(time(NULL));
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
M[i*width + j] = rand() % 30;
N[i*width + j] = rand() % 30;
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
for (int k = 0; k < width; ++k) {
MxN[i*width + j] += M[i*width + k] * N[k*width + j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
MatMul(M, N, P, width);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
if(MxN[i*width + j] != P[i*width + j]) {
printf("MxN[%d][%d] = %2.0f P[%d][%d] = %2.0f\n", i, j, MxN[i*width + j], i, j, P[i*width + j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width)
{
// Thread row and column within matrix
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Each thread computes one element of P
// by accumulating results into Pvalue
float Pvalue = 0;
// Multiply M and N
for (int k = 0; k < width; ++k) {
float Melement = *(Md + row*width + k);
float Nelement = *(Nd + k*width + col);
Pvalue += Melement * Nelement;
}
// Write Pvalue to device memory
// Each thread writes one element
*(Pd + row*width + col) = Pvalue;
}
// Matrix multiplication - Host code
void MatMul(float *M, float *N, float *P, int width)
{
size_t size = width * width * sizeof(float);
float *Md, *Nd, *Pd;
// Allocate and Load M, N to device memory
hipMalloc((void **)&Md, size);
hipMemcpy(Md, M, size, hipMemcpyHostToDevice);
hipMalloc((void **)&Nd, size);
hipMemcpy(Nd, N, size, hipMemcpyHostToDevice);
// Allocate P on the device
hipMalloc((void **)&Pd, size);
// Setup the execution configuration
dim3 dimGrid(WIDTH/threadDim, WIDTH/threadDim); //blocks size of per grid
dim3 dimBlock(threadDim, threadDim); //threads size of per blocks
// Get start time event
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Invoke kernel
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, width);
hipError_t cuda_err = hipGetLastError();
if ( hipSuccess != cuda_err ){
printf("before kernel call: error = %s\n", hipGetErrorString (cuda_err));
exit(1) ;
}
// Get stop time event
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Compute execution time
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
// Read P from device memory
hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
}
|
3df73c743c0669d0ba9d9c22b736764b39d4430b.cu
|
//Use multiple block multiple thread
aaa
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define WIDTH 512
#define threadDim 16
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width);
void MatMul(float *M, float *N, float *P, int width);
int main(int argc, char *argv[])
{
int width = WIDTH;
float M[WIDTH*WIDTH] = {0};
float N[WIDTH*WIDTH] = {0};
float P[WIDTH*WIDTH] = {0};
float MxN[WIDTH*WIDTH] = {0};
int pass = 1;
srand(time(NULL));
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
M[i*width + j] = rand() % 30;
N[i*width + j] = rand() % 30;
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
for (int k = 0; k < width; ++k) {
MxN[i*width + j] += M[i*width + k] * N[k*width + j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
MatMul(M, N, P, width);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
if(MxN[i*width + j] != P[i*width + j]) {
printf("MxN[%d][%d] = %2.0f P[%d][%d] = %2.0f\n", i, j, MxN[i*width + j], i, j, P[i*width + j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width)
{
// Thread row and column within matrix
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Each thread computes one element of P
// by accumulating results into Pvalue
float Pvalue = 0;
// Multiply M and N
for (int k = 0; k < width; ++k) {
float Melement = *(Md + row*width + k);
float Nelement = *(Nd + k*width + col);
Pvalue += Melement * Nelement;
}
// Write Pvalue to device memory
// Each thread writes one element
*(Pd + row*width + col) = Pvalue;
}
// Matrix multiplication - Host code
void MatMul(float *M, float *N, float *P, int width)
{
size_t size = width * width * sizeof(float);
float *Md, *Nd, *Pd;
// Allocate and Load M, N to device memory
cudaMalloc((void **)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
// Allocate P on the device
cudaMalloc((void **)&Pd, size);
// Setup the execution configuration
dim3 dimGrid(WIDTH/threadDim, WIDTH/threadDim); //blocks size of per grid
dim3 dimBlock(threadDim, threadDim); //threads size of per blocks
// Get start time event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Invoke kernel
MatMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
cudaError_t cuda_err = cudaGetLastError();
if ( cudaSuccess != cuda_err ){
printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err));
exit(1) ;
}
// Get stop time event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Compute execution time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Read P from device memory
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
20cb3d57cfc51657c57f618e6568bbe32fad497b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_DIM 32
#define SCALING_FACTOR 10.0
#define TILE_DIM 32
#define NUM_THREADS 1024
#define MOD_BASE 256
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int find_max(int * arr, int num_elem)
{
int max = 0;
for (int i = 0; i < num_elem; i++) {
if (arr[i] > max) {
max = arr[i];
}
}
return max;
}
int * def_mat_dim(int k)
{
int * dim = (int *) malloc(k * sizeof(int));
int i;
//srand(time(NULL));
for (i = 0; i < k; i++)
{
//dim[i] = 10;
dim[i] = (rand() % MAX_DIM) + 1;
//printf("%d\n", dim[i]);
}
return dim;
}
double * creat_mat(int dimX, int dimY)
{
int x;
double * mat = (double *) malloc(dimX * dimY * sizeof(double));
srand(time(NULL));
for (x = 0; x < dimX * dimY; x++) {
//mat[x] = float(rand()) / float(RAND_MAX) * SCALING_FACTOR;
mat[x] = float(rand()) / float(RAND_MAX) * SCALING_FACTOR;
//printf("%f\n", mat[x]);
}
return mat;
}
void if_mats_equal(double * A, double * B, int rows, int cols)
{
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if (A[i * rows + j] != B[i * rows + j]) {
printf("Matrices are not equal\n");
return;
}
}
}
printf("Matrices are equal\n");
}
void cpu_mat_mul(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
double sum = 0.0;
for (int i = 0; i < ARows; i++) {
for (int j = 0; j < BCols; j++) {
for (int k = 0; k < ACols; k++) {
sum += A[i * ACols + k] * B[k * BCols + j];
//C[i * BCols + j] += A[i * ACols + k] * B[k * BCols + j];
}
C[i * BCols + j] = double(int(sum) % MOD_BASE);
sum = 0.0;
}
}
}
void print_mat(double * mat, int dimX, int dimY)
{
for (int i = 0; i < dimX; i++) {
for (int j = 0; j < dimY; j++) {
printf("%2.2f ", mat[i * dimX + j]);
}
printf("\n");
}
}
double * cpu_multi_mat_mult(int num_dim, int * dim_list, double ** mat_list) {
int max_dim = find_max(dim_list, num_dim);
double * output_mat1 = (double *) calloc(max_dim * max_dim, sizeof(double));
double * output_mat2 = (double *) calloc(max_dim * max_dim, sizeof(double));
cpu_mat_mul(mat_list[0], mat_list[1], output_mat1, dim_list[0], dim_list[1], dim_list[1], dim_list[2]);
int num_rows = dim_list[0];
int num_cols = dim_list[2];
//print_mat(output_mat1, num_rows, num_cols);
int num_mult;
for (num_mult = 1; num_mult < num_dim - 2; num_mult++) {
if (num_mult % 2 == 1) {
cpu_mat_mul(output_mat1, mat_list[num_mult + 1], output_mat2, num_rows, num_cols, dim_list[num_mult + 1] , dim_list[num_mult + 2]);
}
else {
cpu_mat_mul(output_mat2, mat_list[num_mult + 1], output_mat1, num_rows, num_cols, dim_list[num_mult + 1] , dim_list[num_mult + 2]);
}
num_cols = dim_list[num_mult + 2];
}
//printf("%d %d\n", num_rows, num_cols);
if (num_mult % 2 == 1) {
free(output_mat2);
return output_mat1;
}
else {
free(output_mat1);
return output_mat2;
}
}
__device__
void MatMul(/* parameters */) {
}
/*
__global__
void matmult(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if ((col < BCols) && (row < ARows)) {
for (int i = 0; i < ACols; i++) {
sum += A[row * ACols + i] * B[i * BCols + col];
}
C[row * BCols + col] = sum;
}
}
*/
__global__
void matmult_general(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
int num_elem_output = ARows * BCols;
int C_elem_row = 0;
int C_elem_col = 0;
double sum = 0.0f;
for (int n = threadIdx.x; n < num_elem_output; n+=NUM_THREADS) {
C_elem_col = n % BCols;
C_elem_row = (n + (BCols - C_elem_col)) / BCols - 1;
for (int i = 0; i < ACols; i++) {
sum += A[C_elem_row * ACols + i] * B[i * BCols + C_elem_col];
}
C[C_elem_row * ACols + C_elem_col] = sum;
sum = 0.0f;
}
}
/*
__global__
void gpu_seq_multi_matmult(int num_dim, int * dim_list, double ** mat_list, double * output_mat1, double * output_mat2)
{
int grid_rows = (dim_list[0] + TILE_DIM - 1) / TILE_DIM;
int grid_cols = (dim_list[2] + TILE_DIM - 1) / TILE_DIM;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(TILE_DIM, TILE_DIM);
if (threadIdx.x == 0) {
matmult<<<dimGrid, dimBlock>>>(mat_list[0], mat_list[1], output_mat, dim_list[0], dim_list[1], dim_list[1], dim_list[2]);
hipDeviceSynchronize();
}
__syncthreads();
//hipDeviceSynchronize();
}
*/
int main()
{
int num_dim = 100;
int num_mat = num_dim - 1;
int * mat_dim = def_mat_dim(num_dim);
double ** mat_list = (double **) malloc((num_mat) * sizeof(double *));
// printf("Copying matrix dimensions to device\n");
int * d_mat_dim;
hipMalloc((void **)&d_mat_dim, num_dim * sizeof(int));
hipMemcpy(d_mat_dim, mat_dim, num_dim * sizeof(int), hipMemcpyHostToDevice);
// printf("Creating Matrix from on host\n");
int k;
for (k = 0; k < num_mat; k++) {
//printf("================= MATRIX %d ====================\n", k);
//printf("%d %d\n", mat_dim[k], mat_dim[k+1]);
mat_list[k] = creat_mat(mat_dim[k], mat_dim[k+1]);
}
// printf("Allocating space to store output matrix\n");
double * out_mat = (double *) malloc(mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
double * d_out_mat;
hipMalloc((void **) &d_out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
// printf("Allocating space for each matrix, and storing pointer address of matrices on the host\n");
double ** int_mat_list = (double **) malloc(num_mat * sizeof(double *));
for (k = 0; k < num_mat; k++) {
hipMalloc((void **)&int_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
hipMemcpy(int_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), hipMemcpyHostToDevice);
}
// printf("Copying pointer addresses of matrices from host to device\n");
double ** d_mat_list;
hipMalloc(&d_mat_list, num_mat * sizeof(double *));
hipMemcpy(d_mat_list, int_mat_list, num_mat * sizeof(double *), hipMemcpyHostToDevice);
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
/*
for (k = 0; k < num_dim-1; k++) {
printf("%d %d %d %d\n", k, mat_dim[k], mat_dim[k+1], &d_mat_list[k]);
hipMalloc((void **)&d_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
//hipMemcpy(d_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), hipMemcpyHostToDevice);
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
}
printf("After d_mat_list\n");
*/
// printf("At the kernel call\n");
/*
int grid_rows = (mat_dim[0] + TILE_DIM - 1) / TILE_DIM;
int grid_cols = (mat_dim[2] + TILE_DIM - 1) / TILE_DIM;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(TILE_DIM, TILE_DIM);
*/
double * cpu_mat = cpu_multi_mat_mult(num_dim, mat_dim, mat_list);
printf("%d %d\n", mat_dim[0], mat_dim[num_dim-1]);
print_mat(cpu_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
/*
printf("%d %d %d\n", mat_dim[0], mat_dim[1], mat_dim[2]);
//matmult<<<dimGrid, dimBlock>>>(int_mat_list[0], int_mat_list[1], d_out_mat, mat_dim[0], mat_dim[1], mat_dim[1], mat_dim[2]);
matmult_general<<<1, NUM_THREADS>>>(int_mat_list[0], int_mat_list[1], d_out_mat, mat_dim[0], mat_dim[1], mat_dim[1], mat_dim[2]);
hipDeviceSynchronize();
//multi_matmult<<<1, NUM_THREADS>>>(num_dim, d_mat_dim, d_mat_list, d_out_mat);
//gpuErrchk(hipPeekAtLastError());
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipMemcpy(out_mat, d_out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(double), hipMemcpyDeviceToHost);
print_mat(out_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
if_mats_equal(out_mat, cpu_mat, mat_dim[0], mat_dim[2]);
*/
return 0;
}
|
20cb3d57cfc51657c57f618e6568bbe32fad497b.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_DIM 32
#define SCALING_FACTOR 10.0
#define TILE_DIM 32
#define NUM_THREADS 1024
#define MOD_BASE 256
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int find_max(int * arr, int num_elem)
{
int max = 0;
for (int i = 0; i < num_elem; i++) {
if (arr[i] > max) {
max = arr[i];
}
}
return max;
}
int * def_mat_dim(int k)
{
int * dim = (int *) malloc(k * sizeof(int));
int i;
//srand(time(NULL));
for (i = 0; i < k; i++)
{
//dim[i] = 10;
dim[i] = (rand() % MAX_DIM) + 1;
//printf("%d\n", dim[i]);
}
return dim;
}
double * creat_mat(int dimX, int dimY)
{
int x;
double * mat = (double *) malloc(dimX * dimY * sizeof(double));
srand(time(NULL));
for (x = 0; x < dimX * dimY; x++) {
//mat[x] = float(rand()) / float(RAND_MAX) * SCALING_FACTOR;
mat[x] = float(rand()) / float(RAND_MAX) * SCALING_FACTOR;
//printf("%f\n", mat[x]);
}
return mat;
}
void if_mats_equal(double * A, double * B, int rows, int cols)
{
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if (A[i * rows + j] != B[i * rows + j]) {
printf("Matrices are not equal\n");
return;
}
}
}
printf("Matrices are equal\n");
}
void cpu_mat_mul(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
double sum = 0.0;
for (int i = 0; i < ARows; i++) {
for (int j = 0; j < BCols; j++) {
for (int k = 0; k < ACols; k++) {
sum += A[i * ACols + k] * B[k * BCols + j];
//C[i * BCols + j] += A[i * ACols + k] * B[k * BCols + j];
}
C[i * BCols + j] = double(int(sum) % MOD_BASE);
sum = 0.0;
}
}
}
void print_mat(double * mat, int dimX, int dimY)
{
for (int i = 0; i < dimX; i++) {
for (int j = 0; j < dimY; j++) {
printf("%2.2f ", mat[i * dimX + j]);
}
printf("\n");
}
}
double * cpu_multi_mat_mult(int num_dim, int * dim_list, double ** mat_list) {
int max_dim = find_max(dim_list, num_dim);
double * output_mat1 = (double *) calloc(max_dim * max_dim, sizeof(double));
double * output_mat2 = (double *) calloc(max_dim * max_dim, sizeof(double));
cpu_mat_mul(mat_list[0], mat_list[1], output_mat1, dim_list[0], dim_list[1], dim_list[1], dim_list[2]);
int num_rows = dim_list[0];
int num_cols = dim_list[2];
//print_mat(output_mat1, num_rows, num_cols);
int num_mult;
for (num_mult = 1; num_mult < num_dim - 2; num_mult++) {
if (num_mult % 2 == 1) {
cpu_mat_mul(output_mat1, mat_list[num_mult + 1], output_mat2, num_rows, num_cols, dim_list[num_mult + 1] , dim_list[num_mult + 2]);
}
else {
cpu_mat_mul(output_mat2, mat_list[num_mult + 1], output_mat1, num_rows, num_cols, dim_list[num_mult + 1] , dim_list[num_mult + 2]);
}
num_cols = dim_list[num_mult + 2];
}
//printf("%d %d\n", num_rows, num_cols);
if (num_mult % 2 == 1) {
free(output_mat2);
return output_mat1;
}
else {
free(output_mat1);
return output_mat2;
}
}
__device__
void MatMul(/* parameters */) {
}
/*
__global__
void matmult(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if ((col < BCols) && (row < ARows)) {
for (int i = 0; i < ACols; i++) {
sum += A[row * ACols + i] * B[i * BCols + col];
}
C[row * BCols + col] = sum;
}
}
*/
__global__
void matmult_general(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
int num_elem_output = ARows * BCols;
int C_elem_row = 0;
int C_elem_col = 0;
double sum = 0.0f;
for (int n = threadIdx.x; n < num_elem_output; n+=NUM_THREADS) {
C_elem_col = n % BCols;
C_elem_row = (n + (BCols - C_elem_col)) / BCols - 1;
for (int i = 0; i < ACols; i++) {
sum += A[C_elem_row * ACols + i] * B[i * BCols + C_elem_col];
}
C[C_elem_row * ACols + C_elem_col] = sum;
sum = 0.0f;
}
}
/*
__global__
void gpu_seq_multi_matmult(int num_dim, int * dim_list, double ** mat_list, double * output_mat1, double * output_mat2)
{
int grid_rows = (dim_list[0] + TILE_DIM - 1) / TILE_DIM;
int grid_cols = (dim_list[2] + TILE_DIM - 1) / TILE_DIM;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(TILE_DIM, TILE_DIM);
if (threadIdx.x == 0) {
matmult<<<dimGrid, dimBlock>>>(mat_list[0], mat_list[1], output_mat, dim_list[0], dim_list[1], dim_list[1], dim_list[2]);
cudaDeviceSynchronize();
}
__syncthreads();
//cudaThreadSynchronize();
}
*/
int main()
{
int num_dim = 100;
int num_mat = num_dim - 1;
int * mat_dim = def_mat_dim(num_dim);
double ** mat_list = (double **) malloc((num_mat) * sizeof(double *));
// printf("Copying matrix dimensions to device\n");
int * d_mat_dim;
cudaMalloc((void **)&d_mat_dim, num_dim * sizeof(int));
cudaMemcpy(d_mat_dim, mat_dim, num_dim * sizeof(int), cudaMemcpyHostToDevice);
// printf("Creating Matrix from on host\n");
int k;
for (k = 0; k < num_mat; k++) {
//printf("================= MATRIX %d ====================\n", k);
//printf("%d %d\n", mat_dim[k], mat_dim[k+1]);
mat_list[k] = creat_mat(mat_dim[k], mat_dim[k+1]);
}
// printf("Allocating space to store output matrix\n");
double * out_mat = (double *) malloc(mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
double * d_out_mat;
cudaMalloc((void **) &d_out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
// printf("Allocating space for each matrix, and storing pointer address of matrices on the host\n");
double ** int_mat_list = (double **) malloc(num_mat * sizeof(double *));
for (k = 0; k < num_mat; k++) {
cudaMalloc((void **)&int_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
cudaMemcpy(int_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), cudaMemcpyHostToDevice);
}
// printf("Copying pointer addresses of matrices from host to device\n");
double ** d_mat_list;
cudaMalloc(&d_mat_list, num_mat * sizeof(double *));
cudaMemcpy(d_mat_list, int_mat_list, num_mat * sizeof(double *), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
/*
for (k = 0; k < num_dim-1; k++) {
printf("%d %d %d %d\n", k, mat_dim[k], mat_dim[k+1], &d_mat_list[k]);
cudaMalloc((void **)&d_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
//cudaMemcpy(d_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
printf("After d_mat_list\n");
*/
// printf("At the kernel call\n");
/*
int grid_rows = (mat_dim[0] + TILE_DIM - 1) / TILE_DIM;
int grid_cols = (mat_dim[2] + TILE_DIM - 1) / TILE_DIM;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(TILE_DIM, TILE_DIM);
*/
double * cpu_mat = cpu_multi_mat_mult(num_dim, mat_dim, mat_list);
printf("%d %d\n", mat_dim[0], mat_dim[num_dim-1]);
print_mat(cpu_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
/*
printf("%d %d %d\n", mat_dim[0], mat_dim[1], mat_dim[2]);
//matmult<<<dimGrid, dimBlock>>>(int_mat_list[0], int_mat_list[1], d_out_mat, mat_dim[0], mat_dim[1], mat_dim[1], mat_dim[2]);
matmult_general<<<1, NUM_THREADS>>>(int_mat_list[0], int_mat_list[1], d_out_mat, mat_dim[0], mat_dim[1], mat_dim[1], mat_dim[2]);
cudaThreadSynchronize();
//multi_matmult<<<1, NUM_THREADS>>>(num_dim, d_mat_dim, d_mat_list, d_out_mat);
//gpuErrchk(cudaPeekAtLastError());
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(out_mat, d_out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(double), cudaMemcpyDeviceToHost);
print_mat(out_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
if_mats_equal(out_mat, cpu_mat, mat_dim[0], mat_dim[2]);
*/
return 0;
}
|
c6c8ef4fe5fd852d8d5d044cf1bb5e7cfab1720c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/framework/details/nan_inf_utils_detail.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace framework {
namespace details {
static std::once_flag init_multi_gpu_op_var_map_flag;
// lazy init
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>&
multi_op_var2gpu_str() {
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>
_multi_op_var2gpu_str;
return _multi_op_var2gpu_str;
}
static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() {
static std::vector<std::mutex> _multi_op_var2gpu_str_mutex;
return _multi_op_var2gpu_str_mutex;
}
static void InitMultiGPUOpVarMap() {
int dev_count = platform::GetGPUDeviceCount();
PADDLE_ENFORCE_GT(dev_count, 0,
platform::errors::NotFound(
"cuda device must > 0, now dev_count=%d", dev_count));
// https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex
std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi(
dev_count);
std::vector<std::mutex> tmp_multi_mutex(dev_count);
multi_op_var2gpu_str().swap(tmp_multi);
multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex);
}
template <typename T>
__device__ __forceinline__ void PrintNanInfKernel(const T* value,
const size_t numel,
int print_num,
char* debug_info) {
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ unsigned int nan_count, inf_count, num_count;
if (threadIdx.x == 0) nan_count = inf_count = num_count = 0;
__syncthreads;
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
unsigned int count = 0;
if (isnan(value[i])) {
count = atomicAdd(&nan_count, 1);
} else if (isinf(value[i])) {
count = atomicAdd(&inf_count, 1);
} else {
count = atomicAdd(&num_count, 1);
}
// for cuda, print in every block
if (count < print_num) {
printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel),
static_cast<uint64_t>(i), static_cast<float>(value[i]));
}
}
__syncthreads;
#ifdef __HIPCC__
if (true && hipThreadIdx_x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x,
nan_count, inf_count, num_count);
#else
if (true && threadIdx.x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x,
nan_count, inf_count, num_count);
#endif
PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info);
}
}
// Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s
template <typename T>
__global__ void CheckNanInfKernel(const T* value, const size_t numel,
int print_num, char* debug_info) {
/// step 1, judge wheater has nan or inf
__shared__ volatile int has_nan_inf;
if (threadIdx.x == 0) has_nan_inf = false;
__syncthreads();
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
T sum = static_cast<T>(0.0);
// Todo(wangxi). simd speed up
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
sum += (value[i] - value[i]);
}
if (isnan(sum) || isinf(sum)) has_nan_inf = true;
__syncthreads();
/// Note. different blocks may behave differently
if (!has_nan_inf) return;
PrintNanInfKernel(value, numel, print_num, debug_info);
}
template <>
template <typename T>
void TensorCheckerVisitor<platform::CUDADeviceContext>::apply(
typename std::enable_if<
std::is_floating_point<T>::value ||
std::is_same<T, ::paddle::platform::complex<float>>::value ||
std::is_same<T, ::paddle::platform::complex<double>>::value>::type*)
const {
int print_num = 3;
auto* dev_ctx = reinterpret_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(tensor_.place()));
int dev_id = tensor_.place().device;
PADDLE_ENFORCE_EQ(
(dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true,
platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d",
multi_op_var2gpu_str_mutex().size()));
std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]";
char* gpu_str_ptr = NULL;
{
auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id);
auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id);
std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex);
if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert
auto gpu_str_tensor =
paddle::memory::Alloc(*dev_ctx, op_var.length() + 1);
gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr());
op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor));
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true,
platform::errors::PreconditionNotMet(
"op_var=%s should successed insert into "
"op_var2gpu_str, but now failed",
op_var));
#ifdef __HIPCC__
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1,
hipMemcpyHostToDevice, dev_ctx->stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1,
hipMemcpyHostToDevice, dev_ctx->stream()));
#endif
} else { // get
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true,
platform::errors::PreconditionNotMet(
"op_var=%s should be in the op_var2gpu_str, but "
"now can't find it",
op_var));
gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr());
}
}
#ifdef __HIPCC__
// HIP will throw GPU memory access fault if threads > 256
const size_t threads = 256;
#else
const size_t threads = 1024;
#endif
size_t blocks =
::min(static_cast<size_t>(128),
static_cast<size_t>((tensor_.numel() + threads - 1) / threads));
#ifdef __HIPCC__
hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0,
dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(),
print_num, gpu_str_ptr);
#else
hipLaunchKernelGGL(( CheckNanInfKernel), dim3(blocks), dim3(threads), 0, dev_ctx->stream(),
tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr);
#endif
}
template <>
void tensor_check<platform::CUDADeviceContext>(const std::string& op_type,
const std::string& var_name,
const framework::Tensor& tensor,
const platform::Place& place) {
std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap);
TensorCheckerVisitor<platform::CUDADeviceContext> vistor(op_type, var_name,
tensor, place);
VisitDataType(framework::TransToProtoVarType(tensor.dtype()), vistor);
}
} // namespace details
} // namespace framework
} // namespace paddle
|
c6c8ef4fe5fd852d8d5d044cf1bb5e7cfab1720c.cu
|
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <unordered_map>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/framework/details/nan_inf_utils_detail.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace framework {
namespace details {
static std::once_flag init_multi_gpu_op_var_map_flag;
// lazy init
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>&
multi_op_var2gpu_str() {
static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>
_multi_op_var2gpu_str;
return _multi_op_var2gpu_str;
}
static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() {
static std::vector<std::mutex> _multi_op_var2gpu_str_mutex;
return _multi_op_var2gpu_str_mutex;
}
static void InitMultiGPUOpVarMap() {
int dev_count = platform::GetGPUDeviceCount();
PADDLE_ENFORCE_GT(dev_count, 0,
platform::errors::NotFound(
"cuda device must > 0, now dev_count=%d", dev_count));
// https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex
std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi(
dev_count);
std::vector<std::mutex> tmp_multi_mutex(dev_count);
multi_op_var2gpu_str().swap(tmp_multi);
multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex);
}
template <typename T>
__device__ __forceinline__ void PrintNanInfKernel(const T* value,
const size_t numel,
int print_num,
char* debug_info) {
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ unsigned int nan_count, inf_count, num_count;
if (threadIdx.x == 0) nan_count = inf_count = num_count = 0;
__syncthreads;
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
unsigned int count = 0;
if (isnan(value[i])) {
count = atomicAdd(&nan_count, 1);
} else if (isinf(value[i])) {
count = atomicAdd(&inf_count, 1);
} else {
count = atomicAdd(&num_count, 1);
}
// for cuda, print in every block
if (count < print_num) {
printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel),
static_cast<uint64_t>(i), static_cast<float>(value[i]));
}
}
__syncthreads;
#ifdef __HIPCC__
if (true && hipThreadIdx_x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x,
nan_count, inf_count, num_count);
#else
if (true && threadIdx.x == 0) {
printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x,
nan_count, inf_count, num_count);
#endif
PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info);
}
}
// Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s
template <typename T>
__global__ void CheckNanInfKernel(const T* value, const size_t numel,
int print_num, char* debug_info) {
/// step 1, judge wheater has nan or inf
__shared__ volatile int has_nan_inf;
if (threadIdx.x == 0) has_nan_inf = false;
__syncthreads();
const size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
T sum = static_cast<T>(0.0);
// Todo(wangxi). simd speed up
for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) {
sum += (value[i] - value[i]);
}
if (isnan(sum) || isinf(sum)) has_nan_inf = true;
__syncthreads();
/// Note. different blocks may behave differently
if (!has_nan_inf) return;
PrintNanInfKernel(value, numel, print_num, debug_info);
}
template <>
template <typename T>
void TensorCheckerVisitor<platform::CUDADeviceContext>::apply(
typename std::enable_if<
std::is_floating_point<T>::value ||
std::is_same<T, ::paddle::platform::complex<float>>::value ||
std::is_same<T, ::paddle::platform::complex<double>>::value>::type*)
const {
int print_num = 3;
auto* dev_ctx = reinterpret_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(tensor_.place()));
int dev_id = tensor_.place().device;
PADDLE_ENFORCE_EQ(
(dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true,
platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d",
multi_op_var2gpu_str_mutex().size()));
std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]";
char* gpu_str_ptr = NULL;
{
auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id);
auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id);
std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex);
if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert
auto gpu_str_tensor =
paddle::memory::Alloc(*dev_ctx, op_var.length() + 1);
gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr());
op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor));
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true,
platform::errors::PreconditionNotMet(
"op_var=%s should successed insert into "
"op_var2gpu_str, but now failed",
op_var));
#ifdef __HIPCC__
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1,
hipMemcpyHostToDevice, dev_ctx->stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1,
cudaMemcpyHostToDevice, dev_ctx->stream()));
#endif
} else { // get
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true,
platform::errors::PreconditionNotMet(
"op_var=%s should be in the op_var2gpu_str, but "
"now can't find it",
op_var));
gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr());
}
}
#ifdef __HIPCC__
// HIP will throw GPU memory access fault if threads > 256
const size_t threads = 256;
#else
const size_t threads = 1024;
#endif
size_t blocks =
std::min(static_cast<size_t>(128),
static_cast<size_t>((tensor_.numel() + threads - 1) / threads));
#ifdef __HIPCC__
hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0,
dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(),
print_num, gpu_str_ptr);
#else
CheckNanInfKernel<<<blocks, threads, 0, dev_ctx->stream()>>>(
tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr);
#endif
}
template <>
void tensor_check<platform::CUDADeviceContext>(const std::string& op_type,
const std::string& var_name,
const framework::Tensor& tensor,
const platform::Place& place) {
std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap);
TensorCheckerVisitor<platform::CUDADeviceContext> vistor(op_type, var_name,
tensor, place);
VisitDataType(framework::TransToProtoVarType(tensor.dtype()), vistor);
}
} // namespace details
} // namespace framework
} // namespace paddle
|
9eeeb4636037d3f54d47f43d8a106e90c39f451f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ztrtri_diag.cu to avoid name conflict with src/ztrtri.o
in the library. The actual kernels are in ztrtri_lower.cu and ztrtri_upper.cu
*/
#include "common_magma.h"
#include "ztrtri.cuh"
/**
Purpose
-------
ZTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ztrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zblas3
********************************************************************/
extern "C" void
magmablas_ztrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaDoubleComplex const * const *dA_array, magma_int_t ldda,
magmaDoubleComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_zlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_zlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( ztrtri_diag_lower_kernel_batched), dim3(diaggrid), dim3(IB), 0, queue , diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_zgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_zgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_zgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_zgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( ztrtri_diag_upper_kernel_batched), dim3(diaggrid), dim3(IB), 0, queue , diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_zgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_zgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_zgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_zgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
9eeeb4636037d3f54d47f43d8a106e90c39f451f.cu
|
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ztrtri_diag.cu to avoid name conflict with src/ztrtri.o
in the library. The actual kernels are in ztrtri_lower.cu and ztrtri_upper.cu
*/
#include "common_magma.h"
#include "ztrtri.cuh"
/**
Purpose
-------
ZTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ztrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zblas3
********************************************************************/
extern "C" void
magmablas_ztrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaDoubleComplex const * const *dA_array, magma_int_t ldda,
magmaDoubleComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_zlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_zlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
ztrtri_diag_lower_kernel_batched<<< diaggrid, IB, 0, queue >>>( diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_zgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_zgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_zgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_zgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
ztrtri_diag_upper_kernel_batched<<< diaggrid, IB, 0, queue >>>( diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_zgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_zgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_zgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_zgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
8c0441923e5c1307bdfb8927e11c5df04deae86b.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
8c0441923e5c1307bdfb8927e11c5df04deae86b.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
98772800c082ebb6e97112776e14d02cca443589.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <mpi.h>
#include <helper_math.h>
#include "cmdparser.hpp"
using namespace std;
// -----------------------------------------------------------------------------------
#define cudaCheckLastError() { \
hipError_t error = hipGetLastError(); \
int id; hipGetDevice(&id); \
if(error != hipSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, hipGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
// -----------------------------------------------------------------------------------
#define MPI_Sync(message) { \
MPI_Barrier(MPI_COMM_WORLD); \
if(rank==master) cout << "----------------------------------------------------------"<< endl; \
if(rank==master) cout << message << endl; \
MPI_Barrier(MPI_COMM_WORLD); \
}
// -----------------------------------------------------------------------------------
/// Mirror effect, acts like Neumann Boundary Condition
#define at(x, y, z, dimx, dimy, dimz) (clamp(z, 0, dimz-1)*dimy*dimx \
+clamp(y, 0, dimy-1)*dimx \
+clamp(x, 0, dimx-1))
// -----------------------------------------------------------------------------------
__global__
void __warmup(float *src, float *dst, int dimx, int dimy, int dimz)
{
//3D global index
int3 index_3d = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//Check valid indices
if (index_3d.x >= dimx || index_3d.y >= dimy || index_3d.z >= dimz)
return;
//
dst[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)]
= src[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)];
}
// -----------------------------------------------------------------------------------
void warmup(float *src, float *dst, int dimx, int dimy, int dimz)
{
dim3 numBlocks((dimx/8 + ((dimx%8)?1:0)),
(dimy/8 + ((dimy%8)?1:0)),
(dimz/8 + ((dimz%8)?1:0)) );
dim3 numThreads(8, 8, 8);
hipLaunchKernelGGL(( __warmup), dim3(numBlocks), dim3(numThreads), 0, 0, src, dst, dimx, dimy, dimz);
}
// -----------------------------------------------------------------------------------
__global__
void __heatflow(float *src, float *dst, int dimx, int dimy, int dimz)
{
//3D global index
int3 index_3d = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//Check valid indices
if (index_3d.x >= dimx || index_3d.y >= dimy || index_3d.z >= dimz)
return;
//
int index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
//
float tmp = index_1d * 0.001f; // Prevent optimization
for(int k=0; k<0; k++)
{
tmp = src[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)];
tmp = tmp-0.5;
tmp = tmp+1;
tmp = tmp-0.5;
dst[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)] = tmp;
}
dst[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)]
= (src[at(index_3d.x+1, index_3d.y+0, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x-1, index_3d.y+0, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y+1, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y-1, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y+0, index_3d.z+1, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y+0, index_3d.z-1, dimx, dimy, dimz)]) / 6.0f;
}
// -----------------------------------------------------------------------------------
void heatflow(float *src, float *dst, int dimx, int dimy, int dimz)
{
dim3 numBlocks((dimx/8 + ((dimx%8)?1:0)),
(dimy/8 + ((dimy%8)?1:0)),
(dimz/8 + ((dimz%8)?1:0)) );
dim3 numThreads(8, 8, 8);
hipLaunchKernelGGL(( __heatflow), dim3(numBlocks), dim3(numThreads), 0, 0, src, dst, dimx, dimy, dimz);
}
// -----------------------------------------------------------------------------------
int main (int argc, char *argv[])
{
//================================================================================
// To set the GPU using hipSetDevice, we must set before launching MPI_Init
// Determine the MPI local rank per node is doable either in OpenMPI or MVAPICH2
int localRank;
char *localRankStr = NULL;
//================================================================================
// Investigate the number of GPUs per node.
int deviceCount = 0;
localRankStr = getenv("OMPI_COMM_WORLD_LOCAL_RANK");
if (localRankStr != NULL)
{
localRank = atoi(localRankStr);
hipGetDeviceCount(&deviceCount);
// cudaCheckLastError(); //Don't put this line
// printf("There are %02d device(s) at local process %02d\n",
// deviceCount, localRank);
cout << "There are " << deviceCount
<< " device(s) at local process "
<< endl;
if(deviceCount>0)
{
hipSetDevice(localRank % deviceCount); cudaCheckLastError();
hipDeviceReset(); cudaCheckLastError();
// hipDeviceEnablePeerAccess (localRank % deviceCount, 0); cudaCheckLastError();
for(int d=0; d<deviceCount; d++)
{
if(d!=(localRank % deviceCount))
{
hipDeviceEnablePeerAccess (d, 0); cudaCheckLastError();
}
}
}
}
//================================================================================
// Information to control the MPI process
// We have totally n processes, index from 0 to n-1
// master process is indexed at n-1, totally 1
// worker processes are indexed from 0 to n-2, totally (n-1)
// the head process is indexed at 0
// the tail process is indexed at (n-2)
int size, rank;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
MPI_Status status;
MPI_Request request;
cout << "Hello World from rank " << rank
<< " out of " << size
<< " at " << name
<< endl;
//================================================================================
// int master = size-1;
// int worker;
// int numMasters = 1;
// int numWorkers = size-1;
// int head = 0;
// int tail = size-2;
int master = 0;
int worker;
int numMasters = 1;
int numWorkers = size;
int head = 0;
int tail = size-1;
//================================================================================
// Parsing the argument
const char* key =
"{ h |help | | print help message }"
"{ |dimx | 512 | Number of the columns }"
"{ |dimy | 512 | Number of the rows }"
"{ |dimz | 512 | Temporal resolution }"
"{ n |numLoops | 10 | Temporal resolution }";
CommandLineParser cmd(argc, argv, key);
// if(rank==master)
// if (argc == 1)
// {
// cout << "Usage: " << argv[0] << " [options]" << endl;
// cout << "Avaible options:" << endl;
// cmd.printParams();
// return 0;
// }
//================================================================================
const int dimx = cmd.get<int>("dimx", false); //default value has been provide
const int dimy = cmd.get<int>("dimy", false);
const int dimz = cmd.get<int>("dimz", false);
const int numLoops = cmd.get<int>("numLoops", false);
// if(rank==master) cmd.printParams();
// if(rank==master) cout << dimx << endl << dimy << endl << dimz << endl;
//================================================================================
//!!! Determine main problem size and data partition same as CUDA style
dim3 procDim;
dim3 knotDim;
dim3 haloDim;
haloDim.x = 0;
haloDim.y = 0;
haloDim.z = 1; // Pad 1
procDim.x = dimx;
procDim.y = dimy;
procDim.z = dimz/numWorkers; // We partition only along z
if(numWorkers==1)
procDim.z = dimz/numWorkers; // We partition only along z
else
{
if(rank==head) procDim.z = 1*haloDim.z + dimz/numWorkers + 0*haloDim.z; // We partition only along z
else if(rank==tail) procDim.z = 0*haloDim.z + dimz/numWorkers + 1*haloDim.z; // We partition only along z
else procDim.z = 1*haloDim.z + dimz/numWorkers + 1*haloDim.z; // We partition only along z
}
knotDim.x = dimx/procDim.x + (dimx%procDim.x)?1:0;
knotDim.y = dimy/procDim.y + (dimy%procDim.y)?1:0;
knotDim.z = dimz/procDim.z + (dimz%procDim.z)?1:0;
if(rank==head)
{
cout << procDim.x << endl << procDim.y << endl << procDim.z << endl;
cout << knotDim.x << endl << knotDim.y << endl << knotDim.z << endl;
}
//================================================================================
// Master node will handle source and destination data
float *h_src, *h_dst;
h_src = NULL;
h_dst = NULL;
int total = dimx * dimy * dimz;
int validSize = dimx * dimy * dimz / numWorkers; // Valid data range
int haloSize = dimx * dimy * haloDim.z;
MPI_Sync("Allocating total memory at master");
if(rank==master)
{
h_src = new float[total];
h_dst = new float[total];
for(int k=0; k<total; k++)
{
h_src[k] = (float)rand();
h_dst[k] = 0;
}
}
//================================================================================
MPI_Sync("Done");
//================================================================================
// Worker or compute node will handle partially the data
// Head: validsize+haloSize
// Middle: validsize+2*haloSize
// Tail: validsize+haloSize
int headSize = validSize + 1*haloSize;
int middleSize = validSize + 2*haloSize;
int tailSize = validSize + 1*haloSize;
int procSize = procDim.x*procDim.y*procDim.z;
float *p_src, *p_dst;
p_src = NULL;
p_dst = NULL;
//================================================================================
MPI_Sync("");
cout << "Allocating src memory at " << rank << endl;
MPI_Sync("");
//================================================================================
p_src = new float[procSize];
// if(numWorkers == 1)
// p_src = new float[validSize];
// else
// {
// if(rank==head) p_src = new float[headSize];
// else if (rank==tail) p_src = new float[tailSize];
// else p_src = new float[middleSize];
// }
//================================================================================
MPI_Sync("Done");
//================================================================================
MPI_Sync("");
cout << "Allocating dst memory at " << rank << endl;
MPI_Sync("");
//================================================================================
p_dst = new float[procSize];
// if(numWorkers == 1)
// p_dst = new float[validSize];
// else
// {
// if(rank==head) p_dst = new float[headSize];
// else if (rank==tail) p_dst = new float[tailSize];
// else p_dst = new float[middleSize];
// }
//================================================================================
MPI_Sync("");
cout << "Allocated dst memory at " << rank << endl;
MPI_Sync("Done");
//================================================================================
/// Start to distribute
// Scatter the data
MPI_Sync("Master is scattering the data");
if(rank==master) //Send
{
if(numWorkers==1)
MPI_Isend(h_src, validSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &request);
else
{
// Send to head
MPI_Isend(h_src, headSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &request);
// Send to tail
MPI_Isend(h_src + tail*validSize - haloSize, tailSize, MPI_FLOAT, tail, 0, MPI_COMM_WORLD, &request);
// Send to middle
for(int mid=head+1; mid<tail; mid++)
MPI_Isend(h_src + mid*validSize - haloSize, middleSize, MPI_FLOAT, mid, 0, MPI_COMM_WORLD, &request);
}
}
// Receive data
MPI_Recv(p_src, procSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// if(numWorkers==1)
// MPI_Recv(p_src, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// else
// {
// // Send to head
// if(rank==head) MPI_Recv(p_src, headSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// else if(rank==tail) MPI_Recv(p_src, tailSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// else MPI_Recv(p_src, middleSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// }
MPI_Sync("Done");
//================================================================================
// Processing here, assume processed, copy directly form src to dst
MPI_Sync("Processing the data");
// Common pattern
if(numWorkers==1)
; // Adjust the size
else
{
if(rank==head) ; // Adjust the size
else if(rank==tail) ; // Adjust the size
else ; // Adjust the size
}
// if(numWorkers==1)
// memcpy(p_dst, p_src, validSize*sizeof(float));
// else
// {
// // Send to head
// if(rank==head) memcpy(p_dst, p_src, headSize*sizeof(float));
// else if(rank==tail) memcpy(p_dst, p_src, tailSize*sizeof(float));
// else memcpy(p_dst, p_src, middleSize*sizeof(float));
// }
// Declare GPU memory
float *d_src;
hipMalloc((void**)&d_src, (procSize)*sizeof(float));
// if(numWorkers==1)
// hipMalloc((void**)&d_src, (validSize)*sizeof(float));
// else
// {
// if(rank==head) hipMalloc((void**)&d_src, (headSize)*sizeof(float));
// else if(rank==tail) hipMalloc((void**)&d_src, (tailSize)*sizeof(float));
// else hipMalloc((void**)&d_src, (middleSize)*sizeof(float));
// }
float *d_dst;
hipMalloc((void**)&d_dst, (procSize)*sizeof(float));
// if(numWorkers==1)
// hipMalloc((void**)&d_dst, (validSize)*sizeof(float));
// else
// {
// if(rank==head) hipMalloc((void**)&d_dst, (headSize)*sizeof(float));
// else if(rank==tail) hipMalloc((void**)&d_dst, (tailSize)*sizeof(float));
// else hipMalloc((void**)&d_dst, (middleSize)*sizeof(float));
// }
MPI_Sync("");
//================================================================================
// Copy to GPU memory
hipMemcpy(d_src, p_src, (procSize)*sizeof(float), hipMemcpyHostToDevice);
// if(numWorkers==1)
// hipMemcpy(d_src, p_src, (validSize)*sizeof(float), hipMemcpyHostToDevice);
// else
// {
// if(rank==head) hipMemcpy(d_src, p_src, (headSize)*sizeof(float), hipMemcpyHostToDevice);
// else if(rank==tail) hipMemcpy(d_src, p_src, (tailSize)*sizeof(float), hipMemcpyHostToDevice);
// else hipMemcpy(d_src, p_src, (middleSize)*sizeof(float), hipMemcpyHostToDevice);
// }
MPI_Sync("");
//================================================================================
// for(int loop=0; loop<numLoops; loop++)
// {
// hipDeviceSynchronize(); cudaCheckLastError();
// MPI_Sync("");
// // Launch the kernel
// warmup(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// // if(numWorkers==1)
// // heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// // else
// // {
// // if(rank==head) heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z+1*haloDim.z);
// // else if(rank==tail) heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z);
// // else heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z+1*haloDim.z);
// // }
// // Device synchronize
// hipDeviceSynchronize(); cudaCheckLastError();
// // Transfer the halo here
// // Copy to right, tail cannot perform
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// // --> |R| | (i,j-1) |S| | --> |R| | (i,j) |S| | --> |R| | (i,j+1) |S| | -->
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// if(numWorkers==1)
// ; // No need
// else
// {
// if(rank<tail) MPI_Isend(d_dst + procSize - 2*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &request);
// if(rank>head) MPI_Recv (d_dst, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &status);
// }
// MPI_Sync("Transfer to right for warming up");
// // Copy to left, head cannot perform
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// //<-- |X|S| (i,j-1) | |R| <-- |X|S| (i,j) | |R| <-- |X|S| (i,j+1) | |R| <--
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// if(numWorkers==1)
// ; // No need
// else
// {
// if(rank>head) MPI_Isend(d_dst + 1*haloSize, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &request);
// if(rank<tail) MPI_Recv (d_dst + procSize - 1*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &status);
// }
// MPI_Sync("Transfer to left for warming up");
// hipDeviceSynchronize(); cudaCheckLastError();
// MPI_Sync("");
// }
//================================================================================
hipDeviceSynchronize(); cudaCheckLastError();
MPI_Sync("");
//================================================================================
double start = MPI_Wtime();
// Launch the kernel
for(int loop=0; loop<numLoops; loop++)
{
hipDeviceSynchronize(); cudaCheckLastError();
MPI_Sync("");
// Launch the kernel
heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// if(numWorkers==1)
// heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// else
// {
// if(rank==head) heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z+1*haloDim.z);
// else if(rank==tail) heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z);
// else heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z+1*haloDim.z);
// }
// Device synchronize
hipDeviceSynchronize(); cudaCheckLastError();
MPI_Sync("Device Synchronization");
// Transfer the halo here
// Copy to right, tail cannot perform
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// --> |R| | (i,j-1) |S| | --> |R| | (i,j) |S| | --> |R| | (i,j+1) |S| | -->
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
if(numWorkers==1)
; // No need
else
{
if(rank<tail) MPI_Isend(d_dst + procSize - 2*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &request);
if(rank>head) MPI_Recv (d_dst, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &status);
}
MPI_Sync("Transfer to right");
// Copy to left, head cannot perform
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
//<-- |X|S| (i,j-1) | |R| <-- |X|S| (i,j) | |R| <-- |X|S| (i,j+1) | |R| <--
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
if(numWorkers==1)
; // No need
else
{
if(rank>head) MPI_Isend(d_dst + 1*haloSize, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &request);
if(rank<tail) MPI_Recv (d_dst + procSize - 1*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &status);
}
MPI_Sync("Transfer to left");
if(loop==(numLoops-1)) break;
std::swap(d_src, d_dst);
}
MPI_Sync("");
//================================================================================
double elapsed = MPI_Wtime() - start;
if(rank == master)
cout << "HeatFlow finish: " << endl
<< "dimx: " << dimx << endl
<< "dimy: " << dimy << endl
<< "dimz: " << dimz << endl
<< "numProcess(es): " << numWorkers << endl
<< "Execution time (s): " << elapsed << endl;
MPI_Sync("Done");
//================================================================================
// Copy to CPU memory
hipMemcpy(p_dst, d_dst, (procSize)*sizeof(float), hipMemcpyDeviceToHost);
// if(numWorkers==1)
// hipMemcpy(p_dst, d_dst, (validSize)*sizeof(float), hipMemcpyDeviceToHost);
// else
// {
// if(rank==head) hipMemcpy(p_dst, d_dst, (headSize)*sizeof(float), hipMemcpyDeviceToHost);
// else if(rank==tail) hipMemcpy(p_dst, d_dst, (tailSize)*sizeof(float), hipMemcpyDeviceToHost);
// else hipMemcpy(p_dst, d_dst, (middleSize)*sizeof(float), hipMemcpyDeviceToHost);
// }
//================================================================================
// Calculate the golden result
float *h_src_ref = NULL;
float *h_dst_ref = NULL;
if(rank==master)
{
h_src_ref = new float[total];
h_dst_ref = new float[total];
memcpy(h_src_ref, h_src, total*sizeof(float));
for(int loop=0; loop<numLoops; loop++)
{
for(int z=0; z<dimz; z++)
for(int y=0; y<dimy; y++)
for(int x=0; x<dimx; x++)
h_dst_ref[at(x, y, z, dimx, dimy, dimz)] = (h_src_ref[at(x+1, y+0, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x-1, y+0, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y+1, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y-1, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y+0, z+1, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y+0, z-1, dimx, dimy, dimz)]) /6.0f;
if(loop==(numLoops-1)) break;
std::swap(h_src_ref, h_dst_ref);
}
}
//================================================================================
// Gathering the data
MPI_Sync("Master is gathering the data");
/// Send data
if(numWorkers==1)
MPI_Isend(p_dst, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
else
{
// Send to head
if(rank==head) MPI_Isend(p_dst, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
else if(rank==tail) MPI_Isend(p_dst + haloSize, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
else MPI_Isend(p_dst + haloSize, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
}
/// Receive data
if(rank==master)
{
if(numWorkers==1)
MPI_Recv(h_dst, validSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &status);
else
{
// Send to head
MPI_Recv(h_dst, validSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &status);
// Send to tail
MPI_Recv(h_dst + tail*validSize, validSize, MPI_FLOAT, tail, 0, MPI_COMM_WORLD, &status);
// Send to middle
for(int mid=head+1; mid<tail; mid++)
MPI_Recv(h_dst + mid*validSize, validSize, MPI_FLOAT, mid, 0, MPI_COMM_WORLD, &status);
}
}
MPI_Sync("Done");
//================================================================================
// check
MPI_Sync("Master is checking the correctness");
if(rank==master)
{
for(int k=0; k<total; k++)
{
if(h_dst_ref[k] != h_dst[k])
{
cout << "Do not match at " << k << endl;
goto cleanup;
}
}
cout << "Matched!!!" << endl;
cleanup:
}
MPI_Sync("Done");
//================================================================================
// Finalize to join all of the MPI processes and terminate the program
MPI_Finalize();
return 0;
}
|
98772800c082ebb6e97112776e14d02cca443589.cu
|
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <mpi.h>
#include <helper_math.h>
#include "cmdparser.hpp"
using namespace std;
// -----------------------------------------------------------------------------------
#define cudaCheckLastError() { \
cudaError_t error = cudaGetLastError(); \
int id; cudaGetDevice(&id); \
if(error != cudaSuccess) { \
printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \
__FILE__,__LINE__, cudaGetErrorString(error), id); \
exit(EXIT_FAILURE); \
} \
}
// -----------------------------------------------------------------------------------
#define MPI_Sync(message) { \
MPI_Barrier(MPI_COMM_WORLD); \
if(rank==master) cout << "----------------------------------------------------------"<< endl; \
if(rank==master) cout << message << endl; \
MPI_Barrier(MPI_COMM_WORLD); \
}
// -----------------------------------------------------------------------------------
/// Mirror effect, acts like Neumann Boundary Condition
#define at(x, y, z, dimx, dimy, dimz) (clamp(z, 0, dimz-1)*dimy*dimx \
+clamp(y, 0, dimy-1)*dimx \
+clamp(x, 0, dimx-1))
// -----------------------------------------------------------------------------------
__global__
void __warmup(float *src, float *dst, int dimx, int dimy, int dimz)
{
//3D global index
int3 index_3d = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//Check valid indices
if (index_3d.x >= dimx || index_3d.y >= dimy || index_3d.z >= dimz)
return;
//
dst[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)]
= src[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)];
}
// -----------------------------------------------------------------------------------
void warmup(float *src, float *dst, int dimx, int dimy, int dimz)
{
dim3 numBlocks((dimx/8 + ((dimx%8)?1:0)),
(dimy/8 + ((dimy%8)?1:0)),
(dimz/8 + ((dimz%8)?1:0)) );
dim3 numThreads(8, 8, 8);
__warmup<<<numBlocks, numThreads>>>(src, dst, dimx, dimy, dimz);
}
// -----------------------------------------------------------------------------------
__global__
void __heatflow(float *src, float *dst, int dimx, int dimy, int dimz)
{
//3D global index
int3 index_3d = make_int3(
blockIdx.x*blockDim.x+threadIdx.x,
blockIdx.y*blockDim.y+threadIdx.y,
blockIdx.z*blockDim.z+threadIdx.z);
//Check valid indices
if (index_3d.x >= dimx || index_3d.y >= dimy || index_3d.z >= dimz)
return;
//
int index_1d = index_3d.z * dimy * dimx +
index_3d.y * dimx +
index_3d.x;
//
float tmp = index_1d * 0.001f; // Prevent optimization
for(int k=0; k<0; k++)
{
tmp = src[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)];
tmp = tmp-0.5;
tmp = tmp+1;
tmp = tmp-0.5;
dst[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)] = tmp;
}
dst[at(index_3d.x, index_3d.y, index_3d.z, dimx, dimy, dimz)]
= (src[at(index_3d.x+1, index_3d.y+0, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x-1, index_3d.y+0, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y+1, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y-1, index_3d.z+0, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y+0, index_3d.z+1, dimx, dimy, dimz)] +
src[at(index_3d.x+0, index_3d.y+0, index_3d.z-1, dimx, dimy, dimz)]) / 6.0f;
}
// -----------------------------------------------------------------------------------
void heatflow(float *src, float *dst, int dimx, int dimy, int dimz)
{
dim3 numBlocks((dimx/8 + ((dimx%8)?1:0)),
(dimy/8 + ((dimy%8)?1:0)),
(dimz/8 + ((dimz%8)?1:0)) );
dim3 numThreads(8, 8, 8);
__heatflow<<<numBlocks, numThreads>>>(src, dst, dimx, dimy, dimz);
}
// -----------------------------------------------------------------------------------
int main (int argc, char *argv[])
{
//================================================================================
// To set the GPU using cudaSetDevice, we must set before launching MPI_Init
// Determine the MPI local rank per node is doable either in OpenMPI or MVAPICH2
int localRank;
char *localRankStr = NULL;
//================================================================================
// Investigate the number of GPUs per node.
int deviceCount = 0;
localRankStr = getenv("OMPI_COMM_WORLD_LOCAL_RANK");
if (localRankStr != NULL)
{
localRank = atoi(localRankStr);
cudaGetDeviceCount(&deviceCount);
// cudaCheckLastError(); //Don't put this line
// printf("There are %02d device(s) at local process %02d\n",
// deviceCount, localRank);
cout << "There are " << deviceCount
<< " device(s) at local process "
<< endl;
if(deviceCount>0)
{
cudaSetDevice(localRank % deviceCount); cudaCheckLastError();
cudaDeviceReset(); cudaCheckLastError();
// cudaDeviceEnablePeerAccess (localRank % deviceCount, 0); cudaCheckLastError();
for(int d=0; d<deviceCount; d++)
{
if(d!=(localRank % deviceCount))
{
cudaDeviceEnablePeerAccess (d, 0); cudaCheckLastError();
}
}
}
}
//================================================================================
// Information to control the MPI process
// We have totally n processes, index from 0 to n-1
// master process is indexed at n-1, totally 1
// worker processes are indexed from 0 to n-2, totally (n-1)
// the head process is indexed at 0
// the tail process is indexed at (n-2)
int size, rank;
char name[MPI_MAX_PROCESSOR_NAME];
int length;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Get_processor_name(name, &length);
MPI_Status status;
MPI_Request request;
cout << "Hello World from rank " << rank
<< " out of " << size
<< " at " << name
<< endl;
//================================================================================
// int master = size-1;
// int worker;
// int numMasters = 1;
// int numWorkers = size-1;
// int head = 0;
// int tail = size-2;
int master = 0;
int worker;
int numMasters = 1;
int numWorkers = size;
int head = 0;
int tail = size-1;
//================================================================================
// Parsing the argument
const char* key =
"{ h |help | | print help message }"
"{ |dimx | 512 | Number of the columns }"
"{ |dimy | 512 | Number of the rows }"
"{ |dimz | 512 | Temporal resolution }"
"{ n |numLoops | 10 | Temporal resolution }";
CommandLineParser cmd(argc, argv, key);
// if(rank==master)
// if (argc == 1)
// {
// cout << "Usage: " << argv[0] << " [options]" << endl;
// cout << "Avaible options:" << endl;
// cmd.printParams();
// return 0;
// }
//================================================================================
const int dimx = cmd.get<int>("dimx", false); //default value has been provide
const int dimy = cmd.get<int>("dimy", false);
const int dimz = cmd.get<int>("dimz", false);
const int numLoops = cmd.get<int>("numLoops", false);
// if(rank==master) cmd.printParams();
// if(rank==master) cout << dimx << endl << dimy << endl << dimz << endl;
//================================================================================
//!!! Determine main problem size and data partition same as CUDA style
dim3 procDim;
dim3 knotDim;
dim3 haloDim;
haloDim.x = 0;
haloDim.y = 0;
haloDim.z = 1; // Pad 1
procDim.x = dimx;
procDim.y = dimy;
procDim.z = dimz/numWorkers; // We partition only along z
if(numWorkers==1)
procDim.z = dimz/numWorkers; // We partition only along z
else
{
if(rank==head) procDim.z = 1*haloDim.z + dimz/numWorkers + 0*haloDim.z; // We partition only along z
else if(rank==tail) procDim.z = 0*haloDim.z + dimz/numWorkers + 1*haloDim.z; // We partition only along z
else procDim.z = 1*haloDim.z + dimz/numWorkers + 1*haloDim.z; // We partition only along z
}
knotDim.x = dimx/procDim.x + (dimx%procDim.x)?1:0;
knotDim.y = dimy/procDim.y + (dimy%procDim.y)?1:0;
knotDim.z = dimz/procDim.z + (dimz%procDim.z)?1:0;
if(rank==head)
{
cout << procDim.x << endl << procDim.y << endl << procDim.z << endl;
cout << knotDim.x << endl << knotDim.y << endl << knotDim.z << endl;
}
//================================================================================
// Master node will handle source and destination data
float *h_src, *h_dst;
h_src = NULL;
h_dst = NULL;
int total = dimx * dimy * dimz;
int validSize = dimx * dimy * dimz / numWorkers; // Valid data range
int haloSize = dimx * dimy * haloDim.z;
MPI_Sync("Allocating total memory at master");
if(rank==master)
{
h_src = new float[total];
h_dst = new float[total];
for(int k=0; k<total; k++)
{
h_src[k] = (float)rand();
h_dst[k] = 0;
}
}
//================================================================================
MPI_Sync("Done");
//================================================================================
// Worker or compute node will handle partially the data
// Head: validsize+haloSize
// Middle: validsize+2*haloSize
// Tail: validsize+haloSize
int headSize = validSize + 1*haloSize;
int middleSize = validSize + 2*haloSize;
int tailSize = validSize + 1*haloSize;
int procSize = procDim.x*procDim.y*procDim.z;
float *p_src, *p_dst;
p_src = NULL;
p_dst = NULL;
//================================================================================
MPI_Sync("");
cout << "Allocating src memory at " << rank << endl;
MPI_Sync("");
//================================================================================
p_src = new float[procSize];
// if(numWorkers == 1)
// p_src = new float[validSize];
// else
// {
// if(rank==head) p_src = new float[headSize];
// else if (rank==tail) p_src = new float[tailSize];
// else p_src = new float[middleSize];
// }
//================================================================================
MPI_Sync("Done");
//================================================================================
MPI_Sync("");
cout << "Allocating dst memory at " << rank << endl;
MPI_Sync("");
//================================================================================
p_dst = new float[procSize];
// if(numWorkers == 1)
// p_dst = new float[validSize];
// else
// {
// if(rank==head) p_dst = new float[headSize];
// else if (rank==tail) p_dst = new float[tailSize];
// else p_dst = new float[middleSize];
// }
//================================================================================
MPI_Sync("");
cout << "Allocated dst memory at " << rank << endl;
MPI_Sync("Done");
//================================================================================
/// Start to distribute
// Scatter the data
MPI_Sync("Master is scattering the data");
if(rank==master) //Send
{
if(numWorkers==1)
MPI_Isend(h_src, validSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &request);
else
{
// Send to head
MPI_Isend(h_src, headSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &request);
// Send to tail
MPI_Isend(h_src + tail*validSize - haloSize, tailSize, MPI_FLOAT, tail, 0, MPI_COMM_WORLD, &request);
// Send to middle
for(int mid=head+1; mid<tail; mid++)
MPI_Isend(h_src + mid*validSize - haloSize, middleSize, MPI_FLOAT, mid, 0, MPI_COMM_WORLD, &request);
}
}
// Receive data
MPI_Recv(p_src, procSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// if(numWorkers==1)
// MPI_Recv(p_src, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// else
// {
// // Send to head
// if(rank==head) MPI_Recv(p_src, headSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// else if(rank==tail) MPI_Recv(p_src, tailSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// else MPI_Recv(p_src, middleSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &status);
// }
MPI_Sync("Done");
//================================================================================
// Processing here, assume processed, copy directly form src to dst
MPI_Sync("Processing the data");
// Common pattern
if(numWorkers==1)
; // Adjust the size
else
{
if(rank==head) ; // Adjust the size
else if(rank==tail) ; // Adjust the size
else ; // Adjust the size
}
// if(numWorkers==1)
// memcpy(p_dst, p_src, validSize*sizeof(float));
// else
// {
// // Send to head
// if(rank==head) memcpy(p_dst, p_src, headSize*sizeof(float));
// else if(rank==tail) memcpy(p_dst, p_src, tailSize*sizeof(float));
// else memcpy(p_dst, p_src, middleSize*sizeof(float));
// }
// Declare GPU memory
float *d_src;
cudaMalloc((void**)&d_src, (procSize)*sizeof(float));
// if(numWorkers==1)
// cudaMalloc((void**)&d_src, (validSize)*sizeof(float));
// else
// {
// if(rank==head) cudaMalloc((void**)&d_src, (headSize)*sizeof(float));
// else if(rank==tail) cudaMalloc((void**)&d_src, (tailSize)*sizeof(float));
// else cudaMalloc((void**)&d_src, (middleSize)*sizeof(float));
// }
float *d_dst;
cudaMalloc((void**)&d_dst, (procSize)*sizeof(float));
// if(numWorkers==1)
// cudaMalloc((void**)&d_dst, (validSize)*sizeof(float));
// else
// {
// if(rank==head) cudaMalloc((void**)&d_dst, (headSize)*sizeof(float));
// else if(rank==tail) cudaMalloc((void**)&d_dst, (tailSize)*sizeof(float));
// else cudaMalloc((void**)&d_dst, (middleSize)*sizeof(float));
// }
MPI_Sync("");
//================================================================================
// Copy to GPU memory
cudaMemcpy(d_src, p_src, (procSize)*sizeof(float), cudaMemcpyHostToDevice);
// if(numWorkers==1)
// cudaMemcpy(d_src, p_src, (validSize)*sizeof(float), cudaMemcpyHostToDevice);
// else
// {
// if(rank==head) cudaMemcpy(d_src, p_src, (headSize)*sizeof(float), cudaMemcpyHostToDevice);
// else if(rank==tail) cudaMemcpy(d_src, p_src, (tailSize)*sizeof(float), cudaMemcpyHostToDevice);
// else cudaMemcpy(d_src, p_src, (middleSize)*sizeof(float), cudaMemcpyHostToDevice);
// }
MPI_Sync("");
//================================================================================
// for(int loop=0; loop<numLoops; loop++)
// {
// cudaDeviceSynchronize(); cudaCheckLastError();
// MPI_Sync("");
// // Launch the kernel
// warmup(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// // if(numWorkers==1)
// // heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// // else
// // {
// // if(rank==head) heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z+1*haloDim.z);
// // else if(rank==tail) heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z);
// // else heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z+1*haloDim.z);
// // }
// // Device synchronize
// cudaDeviceSynchronize(); cudaCheckLastError();
// // Transfer the halo here
// // Copy to right, tail cannot perform
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// // --> |R| | (i,j-1) |S| | --> |R| | (i,j) |S| | --> |R| | (i,j+1) |S| | -->
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// if(numWorkers==1)
// ; // No need
// else
// {
// if(rank<tail) MPI_Isend(d_dst + procSize - 2*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &request);
// if(rank>head) MPI_Recv (d_dst, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &status);
// }
// MPI_Sync("Transfer to right for warming up");
// // Copy to left, head cannot perform
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// //<-- |X|S| (i,j-1) | |R| <-- |X|S| (i,j) | |R| <-- |X|S| (i,j+1) | |R| <--
// // // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// if(numWorkers==1)
// ; // No need
// else
// {
// if(rank>head) MPI_Isend(d_dst + 1*haloSize, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &request);
// if(rank<tail) MPI_Recv (d_dst + procSize - 1*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &status);
// }
// MPI_Sync("Transfer to left for warming up");
// cudaDeviceSynchronize(); cudaCheckLastError();
// MPI_Sync("");
// }
//================================================================================
cudaDeviceSynchronize(); cudaCheckLastError();
MPI_Sync("");
//================================================================================
double start = MPI_Wtime();
// Launch the kernel
for(int loop=0; loop<numLoops; loop++)
{
cudaDeviceSynchronize(); cudaCheckLastError();
MPI_Sync("");
// Launch the kernel
heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// if(numWorkers==1)
// heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z);
// else
// {
// if(rank==head) heatflow(d_src, d_dst, procDim.x, procDim.y, procDim.z+1*haloDim.z);
// else if(rank==tail) heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z);
// else heatflow(d_src, d_dst, procDim.x, procDim.y, 1*haloDim.z+procDim.z+1*haloDim.z);
// }
// Device synchronize
cudaDeviceSynchronize(); cudaCheckLastError();
MPI_Sync("Device Synchronization");
// Transfer the halo here
// Copy to right, tail cannot perform
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
// --> |R| | (i,j-1) |S| | --> |R| | (i,j) |S| | --> |R| | (i,j+1) |S| | -->
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
if(numWorkers==1)
; // No need
else
{
if(rank<tail) MPI_Isend(d_dst + procSize - 2*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &request);
if(rank>head) MPI_Recv (d_dst, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &status);
}
MPI_Sync("Transfer to right");
// Copy to left, head cannot perform
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
//<-- |X|S| (i,j-1) | |R| <-- |X|S| (i,j) | |R| <-- |X|S| (i,j+1) | |R| <--
// // +-+-+---------+-+-+ +-+-+---------+-+-+ +-+-+---------+-+-+
if(numWorkers==1)
; // No need
else
{
if(rank>head) MPI_Isend(d_dst + 1*haloSize, haloSize, MPI_FLOAT, rank-1, 0, MPI_COMM_WORLD, &request);
if(rank<tail) MPI_Recv (d_dst + procSize - 1*haloSize, haloSize, MPI_FLOAT, rank+1, 0, MPI_COMM_WORLD, &status);
}
MPI_Sync("Transfer to left");
if(loop==(numLoops-1)) break;
std::swap(d_src, d_dst);
}
MPI_Sync("");
//================================================================================
double elapsed = MPI_Wtime() - start;
if(rank == master)
cout << "HeatFlow finish: " << endl
<< "dimx: " << dimx << endl
<< "dimy: " << dimy << endl
<< "dimz: " << dimz << endl
<< "numProcess(es): " << numWorkers << endl
<< "Execution time (s): " << elapsed << endl;
MPI_Sync("Done");
//================================================================================
// Copy to CPU memory
cudaMemcpy(p_dst, d_dst, (procSize)*sizeof(float), cudaMemcpyDeviceToHost);
// if(numWorkers==1)
// cudaMemcpy(p_dst, d_dst, (validSize)*sizeof(float), cudaMemcpyDeviceToHost);
// else
// {
// if(rank==head) cudaMemcpy(p_dst, d_dst, (headSize)*sizeof(float), cudaMemcpyDeviceToHost);
// else if(rank==tail) cudaMemcpy(p_dst, d_dst, (tailSize)*sizeof(float), cudaMemcpyDeviceToHost);
// else cudaMemcpy(p_dst, d_dst, (middleSize)*sizeof(float), cudaMemcpyDeviceToHost);
// }
//================================================================================
// Calculate the golden result
float *h_src_ref = NULL;
float *h_dst_ref = NULL;
if(rank==master)
{
h_src_ref = new float[total];
h_dst_ref = new float[total];
memcpy(h_src_ref, h_src, total*sizeof(float));
for(int loop=0; loop<numLoops; loop++)
{
for(int z=0; z<dimz; z++)
for(int y=0; y<dimy; y++)
for(int x=0; x<dimx; x++)
h_dst_ref[at(x, y, z, dimx, dimy, dimz)] = (h_src_ref[at(x+1, y+0, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x-1, y+0, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y+1, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y-1, z+0, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y+0, z+1, dimx, dimy, dimz)] +
h_src_ref[at(x+0, y+0, z-1, dimx, dimy, dimz)]) /6.0f;
if(loop==(numLoops-1)) break;
std::swap(h_src_ref, h_dst_ref);
}
}
//================================================================================
// Gathering the data
MPI_Sync("Master is gathering the data");
/// Send data
if(numWorkers==1)
MPI_Isend(p_dst, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
else
{
// Send to head
if(rank==head) MPI_Isend(p_dst, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
else if(rank==tail) MPI_Isend(p_dst + haloSize, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
else MPI_Isend(p_dst + haloSize, validSize, MPI_FLOAT, master, 0, MPI_COMM_WORLD, &request);
}
/// Receive data
if(rank==master)
{
if(numWorkers==1)
MPI_Recv(h_dst, validSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &status);
else
{
// Send to head
MPI_Recv(h_dst, validSize, MPI_FLOAT, head, 0, MPI_COMM_WORLD, &status);
// Send to tail
MPI_Recv(h_dst + tail*validSize, validSize, MPI_FLOAT, tail, 0, MPI_COMM_WORLD, &status);
// Send to middle
for(int mid=head+1; mid<tail; mid++)
MPI_Recv(h_dst + mid*validSize, validSize, MPI_FLOAT, mid, 0, MPI_COMM_WORLD, &status);
}
}
MPI_Sync("Done");
//================================================================================
// check
MPI_Sync("Master is checking the correctness");
if(rank==master)
{
for(int k=0; k<total; k++)
{
if(h_dst_ref[k] != h_dst[k])
{
cout << "Do not match at " << k << endl;
goto cleanup;
}
}
cout << "Matched!!!" << endl;
cleanup:
}
MPI_Sync("Done");
//================================================================================
// Finalize to join all of the MPI processes and terminate the program
MPI_Finalize();
return 0;
}
|
22608f8fb7eecd89d673e4ef4a201f3a3701f7e7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <traversal/bfs_ref.h>
#include <utilities/test_utilities.hpp>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <thrust/device_vector.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <algorithms.hpp>
#include <graph.hpp>
#include <fstream>
#include <queue>
#include <stack>
#include <utility>
#ifndef TEST_EPSILON
#define TEST_EPSILON 0.0001
#endif
// NOTE: Defines under which values the difference should be discarded when
// considering values are close to zero
// i.e: Do we consider that the difference between 1.3e-9 and 8.e-12 is
// significant
#ifndef TEST_ZERO_THRESHOLD
#define TEST_ZERO_THRESHOLD 1e-10
#endif
// ============================================================================
// C++ Reference Implementation
// ============================================================================
template <typename VT, typename ET, typename WT, typename result_t>
ET get_edge_index_from_source_and_destination(VT source_vertex,
VT destination_vertex,
VT const *indices,
ET const *offsets)
{
ET index = -1;
ET first_edge_idx = offsets[source_vertex];
ET last_edge_idx = offsets[source_vertex + 1];
auto index_it = std::find(indices + first_edge_idx, indices + last_edge_idx, destination_vertex);
if (index_it != (indices + last_edge_idx)) { index = std::distance(indices, index_it); }
return index;
}
template <typename VT, typename ET, typename WT, typename result_t>
void ref_accumulation(result_t *result,
VT const *indices,
ET const *offsets,
VT const number_of_vertices,
std::stack<VT> &S,
std::vector<std::vector<VT>> &pred,
std::vector<double> &sigmas,
std::vector<double> &deltas,
VT source)
{
for (VT v = 0; v < number_of_vertices; ++v) { deltas[v] = 0; }
while (!S.empty()) {
VT w = S.top();
S.pop();
for (VT v : pred[w]) {
ET edge_idx =
get_edge_index_from_source_and_destination<VT, ET, WT, result_t>(v, w, indices, offsets);
double coefficient = (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
deltas[v] += coefficient;
result[edge_idx] += coefficient;
}
}
}
// Algorithm 1: Shortest-path vertex betweenness, (Brandes, 2001)
template <typename VT, typename ET, typename WT, typename result_t>
void reference_edge_betweenness_centrality_impl(VT *indices,
ET *offsets,
VT const number_of_vertices,
result_t *result,
VT const *sources,
VT const number_of_sources)
{
std::queue<VT> Q;
std::stack<VT> S;
// NOTE: dist is of type VT not WT
std::vector<VT> dist(number_of_vertices);
std::vector<std::vector<VT>> pred(number_of_vertices);
std::vector<double> sigmas(number_of_vertices);
std::vector<double> deltas(number_of_vertices);
std::vector<VT> neighbors;
if (sources) {
for (VT source_idx = 0; source_idx < number_of_sources; ++source_idx) {
VT s = sources[source_idx];
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<VT, ET>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<VT, ET, WT, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
} else {
for (VT s = 0; s < number_of_vertices; ++s) {
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<VT, ET>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<VT, ET, WT, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
}
template <typename VT, typename ET, typename WT, typename result_t>
void reference_rescale(result_t *result,
bool directed,
bool normalize,
VT const number_of_vertices,
ET const number_of_edges)
{
result_t rescale_factor = static_cast<result_t>(1);
result_t casted_number_of_vertices = static_cast<result_t>(number_of_vertices);
if (normalize) {
if (number_of_vertices > 1) {
rescale_factor /= ((casted_number_of_vertices) * (casted_number_of_vertices - 1));
}
} else {
if (!directed) { rescale_factor /= static_cast<result_t>(2); }
}
for (auto idx = 0; idx < number_of_edges; ++idx) { result[idx] *= rescale_factor; }
}
template <typename VT, typename ET, typename WT, typename result_t>
void reference_edge_betweenness_centrality(cugraph::GraphCSRView<VT, ET, WT> const &graph,
result_t *result,
bool normalize,
VT const number_of_sources,
VT const *sources)
{
VT number_of_vertices = graph.number_of_vertices;
ET number_of_edges = graph.number_of_edges;
thrust::host_vector<VT> h_indices(number_of_edges);
thrust::host_vector<ET> h_offsets(number_of_vertices + 1);
thrust::device_ptr<VT> d_indices((VT *)&graph.indices[0]);
thrust::device_ptr<ET> d_offsets((ET *)&graph.offsets[0]);
thrust::copy(d_indices, d_indices + number_of_edges, h_indices.begin());
thrust::copy(d_offsets, d_offsets + (number_of_vertices + 1), h_offsets.begin());
hipDeviceSynchronize();
reference_edge_betweenness_centrality_impl<VT, ET, WT, result_t>(
&h_indices[0], &h_offsets[0], number_of_vertices, result, sources, number_of_sources);
reference_rescale<VT, ET, WT, result_t>(
result, graph.prop.directed, normalize, number_of_vertices, number_of_edges);
}
// =============================================================================
// Utility functions
// =============================================================================
// Compare while allowing relatie error of epsilon
// zero_threshold indicates when we should drop comparison for small numbers
template <typename T, typename precision_t>
bool compare_close(const T &a, const T &b, const precision_t epsilon, precision_t zero_threshold)
{
return ((zero_threshold > a && zero_threshold > b)) ||
(a >= b * (1.0 - epsilon)) && (a <= b * (1.0 + epsilon));
}
// =============================================================================
// Test Suite
// =============================================================================
// Defines Betweenness Centrality UseCase
// SSSP's test suite code uses type of Graph parameter that could be used
// (MTX / RMAT)
typedef struct EdgeBC_Usecase_t {
std::string config_; // Path to graph file
std::string file_path_; // Complete path to graph using dataset_root_dir
int number_of_sources_; // Starting point from the traversal
EdgeBC_Usecase_t(const std::string &config, int number_of_sources)
: config_(config), number_of_sources_(number_of_sources)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// FIXME: Use platform independent stuff from c++14/17 on compiler update
const std::string &rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((config_ != "") && (config_[0] != '/')) {
file_path_ = rapidsDatasetRootDir + "/" + config_;
} else {
file_path_ = config_;
}
};
} EdgeBC_Usecase;
class Tests_EdgeBC : public ::testing::TestWithParam<EdgeBC_Usecase> {
raft::handle_t handle_;
public:
Tests_EdgeBC() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// FIXME: Should normalize be part of the configuration instead?
// VT vertex identifier data type
// ET edge identifier data type
// WT edge weight data type
// result_t result data type
// normalize should the result be normalized
template <typename VT, typename ET, typename WT, typename result_t, bool normalize>
void run_current_test(const EdgeBC_Usecase &configuration)
{
// Step 1: Construction of the graph based on configuration
bool is_directed = false;
auto csr =
cugraph::test::generate_graph_csr_from_mm<VT, ET, WT>(is_directed, configuration.file_path_);
hipDeviceSynchronize();
cugraph::GraphCSRView<VT, ET, WT> G = csr->view();
G.prop.directed = is_directed;
CUDA_TRY(hipGetLastError());
std::vector<result_t> result(G.number_of_edges, 0);
std::vector<result_t> expected(G.number_of_edges, 0);
// Step 2: Generation of sources based on configuration
// if number_of_sources_ is 0 then sources must be nullptr
// Otherwise we only use the first k values
ASSERT_TRUE(configuration.number_of_sources_ >= 0 &&
configuration.number_of_sources_ <= G.number_of_vertices)
<< "Number number of sources should be >= 0 and"
<< " less than the number of vertices in the graph";
std::vector<VT> sources(configuration.number_of_sources_);
thrust::sequence(thrust::host, sources.begin(), sources.end(), 0);
VT *sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
reference_edge_betweenness_centrality(
G, expected.data(), normalize, configuration.number_of_sources_, sources_ptr);
sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
thrust::device_vector<result_t> d_result(G.number_of_edges);
cugraph::edge_betweenness_centrality(handle_,
G,
d_result.data().get(),
normalize,
static_cast<WT *>(nullptr),
configuration.number_of_sources_,
sources_ptr);
CUDA_TRY(hipMemcpy(result.data(),
d_result.data().get(),
sizeof(result_t) * G.number_of_edges,
hipMemcpyDeviceToHost));
for (int i = 0; i < G.number_of_edges; ++i)
EXPECT_TRUE(compare_close(result[i], expected[i], TEST_EPSILON, TEST_ZERO_THRESHOLD))
<< "[MISMATCH] vaid = " << i << ", cugraph = " << result[i]
<< " expected = " << expected[i];
}
};
// ============================================================================
// Tests
// ============================================================================
// Verifiy Un-Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NO_NORMALIZE)
{
run_current_test<int, int, float, float, false>(GetParam());
}
TEST_P(Tests_EdgeBC, CheckFP64_NO_NORMALIZE)
{
run_current_test<int, int, double, double, false>(GetParam());
}
// Verifiy Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NORMALIZE)
{
run_current_test<int, int, float, float, true>(GetParam());
}
TEST_P(Tests_EdgeBC, CheckFP64_NORMALIZE)
{
run_current_test<int, int, double, double, true>(GetParam());
}
// FIXME: There is an InvalidValue on a Memcopy only on tests/datasets/dblp.mtx
INSTANTIATE_TEST_CASE_P(simple_test,
Tests_EdgeBC,
::testing::Values(EdgeBC_Usecase("test/datasets/karate.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki2003.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki-Talk.mtx", 4)));
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
auto resource = std::make_unique<rmm::mr::cuda_memory_resource>();
rmm::mr::set_default_resource(resource.get());
int rc = RUN_ALL_TESTS();
return rc;
}
|
22608f8fb7eecd89d673e4ef4a201f3a3701f7e7.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <traversal/bfs_ref.h>
#include <utilities/test_utilities.hpp>
#include <raft/error.hpp>
#include <raft/handle.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <thrust/device_vector.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <algorithms.hpp>
#include <graph.hpp>
#include <fstream>
#include <queue>
#include <stack>
#include <utility>
#ifndef TEST_EPSILON
#define TEST_EPSILON 0.0001
#endif
// NOTE: Defines under which values the difference should be discarded when
// considering values are close to zero
// i.e: Do we consider that the difference between 1.3e-9 and 8.e-12 is
// significant
#ifndef TEST_ZERO_THRESHOLD
#define TEST_ZERO_THRESHOLD 1e-10
#endif
// ============================================================================
// C++ Reference Implementation
// ============================================================================
template <typename VT, typename ET, typename WT, typename result_t>
ET get_edge_index_from_source_and_destination(VT source_vertex,
VT destination_vertex,
VT const *indices,
ET const *offsets)
{
ET index = -1;
ET first_edge_idx = offsets[source_vertex];
ET last_edge_idx = offsets[source_vertex + 1];
auto index_it = std::find(indices + first_edge_idx, indices + last_edge_idx, destination_vertex);
if (index_it != (indices + last_edge_idx)) { index = std::distance(indices, index_it); }
return index;
}
template <typename VT, typename ET, typename WT, typename result_t>
void ref_accumulation(result_t *result,
VT const *indices,
ET const *offsets,
VT const number_of_vertices,
std::stack<VT> &S,
std::vector<std::vector<VT>> &pred,
std::vector<double> &sigmas,
std::vector<double> &deltas,
VT source)
{
for (VT v = 0; v < number_of_vertices; ++v) { deltas[v] = 0; }
while (!S.empty()) {
VT w = S.top();
S.pop();
for (VT v : pred[w]) {
ET edge_idx =
get_edge_index_from_source_and_destination<VT, ET, WT, result_t>(v, w, indices, offsets);
double coefficient = (sigmas[v] / sigmas[w]) * (1.0 + deltas[w]);
deltas[v] += coefficient;
result[edge_idx] += coefficient;
}
}
}
// Algorithm 1: Shortest-path vertex betweenness, (Brandes, 2001)
template <typename VT, typename ET, typename WT, typename result_t>
void reference_edge_betweenness_centrality_impl(VT *indices,
ET *offsets,
VT const number_of_vertices,
result_t *result,
VT const *sources,
VT const number_of_sources)
{
std::queue<VT> Q;
std::stack<VT> S;
// NOTE: dist is of type VT not WT
std::vector<VT> dist(number_of_vertices);
std::vector<std::vector<VT>> pred(number_of_vertices);
std::vector<double> sigmas(number_of_vertices);
std::vector<double> deltas(number_of_vertices);
std::vector<VT> neighbors;
if (sources) {
for (VT source_idx = 0; source_idx < number_of_sources; ++source_idx) {
VT s = sources[source_idx];
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<VT, ET>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<VT, ET, WT, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
} else {
for (VT s = 0; s < number_of_vertices; ++s) {
// Step 1: Single-source shortest-paths problem
// a. Initialization
ref_bfs<VT, ET>(indices, offsets, number_of_vertices, Q, S, dist, pred, sigmas, s);
// Step 2: Accumulation
// Back propagation of dependencies
ref_accumulation<VT, ET, WT, result_t>(
result, indices, offsets, number_of_vertices, S, pred, sigmas, deltas, s);
}
}
}
template <typename VT, typename ET, typename WT, typename result_t>
void reference_rescale(result_t *result,
bool directed,
bool normalize,
VT const number_of_vertices,
ET const number_of_edges)
{
result_t rescale_factor = static_cast<result_t>(1);
result_t casted_number_of_vertices = static_cast<result_t>(number_of_vertices);
if (normalize) {
if (number_of_vertices > 1) {
rescale_factor /= ((casted_number_of_vertices) * (casted_number_of_vertices - 1));
}
} else {
if (!directed) { rescale_factor /= static_cast<result_t>(2); }
}
for (auto idx = 0; idx < number_of_edges; ++idx) { result[idx] *= rescale_factor; }
}
template <typename VT, typename ET, typename WT, typename result_t>
void reference_edge_betweenness_centrality(cugraph::GraphCSRView<VT, ET, WT> const &graph,
result_t *result,
bool normalize,
VT const number_of_sources,
VT const *sources)
{
VT number_of_vertices = graph.number_of_vertices;
ET number_of_edges = graph.number_of_edges;
thrust::host_vector<VT> h_indices(number_of_edges);
thrust::host_vector<ET> h_offsets(number_of_vertices + 1);
thrust::device_ptr<VT> d_indices((VT *)&graph.indices[0]);
thrust::device_ptr<ET> d_offsets((ET *)&graph.offsets[0]);
thrust::copy(d_indices, d_indices + number_of_edges, h_indices.begin());
thrust::copy(d_offsets, d_offsets + (number_of_vertices + 1), h_offsets.begin());
cudaDeviceSynchronize();
reference_edge_betweenness_centrality_impl<VT, ET, WT, result_t>(
&h_indices[0], &h_offsets[0], number_of_vertices, result, sources, number_of_sources);
reference_rescale<VT, ET, WT, result_t>(
result, graph.prop.directed, normalize, number_of_vertices, number_of_edges);
}
// =============================================================================
// Utility functions
// =============================================================================
// Compare while allowing relatie error of epsilon
// zero_threshold indicates when we should drop comparison for small numbers
template <typename T, typename precision_t>
bool compare_close(const T &a, const T &b, const precision_t epsilon, precision_t zero_threshold)
{
return ((zero_threshold > a && zero_threshold > b)) ||
(a >= b * (1.0 - epsilon)) && (a <= b * (1.0 + epsilon));
}
// =============================================================================
// Test Suite
// =============================================================================
// Defines Betweenness Centrality UseCase
// SSSP's test suite code uses type of Graph parameter that could be used
// (MTX / RMAT)
typedef struct EdgeBC_Usecase_t {
std::string config_; // Path to graph file
std::string file_path_; // Complete path to graph using dataset_root_dir
int number_of_sources_; // Starting point from the traversal
EdgeBC_Usecase_t(const std::string &config, int number_of_sources)
: config_(config), number_of_sources_(number_of_sources)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
// FIXME: Use platform independent stuff from c++14/17 on compiler update
const std::string &rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((config_ != "") && (config_[0] != '/')) {
file_path_ = rapidsDatasetRootDir + "/" + config_;
} else {
file_path_ = config_;
}
};
} EdgeBC_Usecase;
class Tests_EdgeBC : public ::testing::TestWithParam<EdgeBC_Usecase> {
raft::handle_t handle_;
public:
Tests_EdgeBC() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// FIXME: Should normalize be part of the configuration instead?
// VT vertex identifier data type
// ET edge identifier data type
// WT edge weight data type
// result_t result data type
// normalize should the result be normalized
template <typename VT, typename ET, typename WT, typename result_t, bool normalize>
void run_current_test(const EdgeBC_Usecase &configuration)
{
// Step 1: Construction of the graph based on configuration
bool is_directed = false;
auto csr =
cugraph::test::generate_graph_csr_from_mm<VT, ET, WT>(is_directed, configuration.file_path_);
cudaDeviceSynchronize();
cugraph::GraphCSRView<VT, ET, WT> G = csr->view();
G.prop.directed = is_directed;
CUDA_TRY(cudaGetLastError());
std::vector<result_t> result(G.number_of_edges, 0);
std::vector<result_t> expected(G.number_of_edges, 0);
// Step 2: Generation of sources based on configuration
// if number_of_sources_ is 0 then sources must be nullptr
// Otherwise we only use the first k values
ASSERT_TRUE(configuration.number_of_sources_ >= 0 &&
configuration.number_of_sources_ <= G.number_of_vertices)
<< "Number number of sources should be >= 0 and"
<< " less than the number of vertices in the graph";
std::vector<VT> sources(configuration.number_of_sources_);
thrust::sequence(thrust::host, sources.begin(), sources.end(), 0);
VT *sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
reference_edge_betweenness_centrality(
G, expected.data(), normalize, configuration.number_of_sources_, sources_ptr);
sources_ptr = nullptr;
if (configuration.number_of_sources_ > 0) { sources_ptr = sources.data(); }
thrust::device_vector<result_t> d_result(G.number_of_edges);
cugraph::edge_betweenness_centrality(handle_,
G,
d_result.data().get(),
normalize,
static_cast<WT *>(nullptr),
configuration.number_of_sources_,
sources_ptr);
CUDA_TRY(cudaMemcpy(result.data(),
d_result.data().get(),
sizeof(result_t) * G.number_of_edges,
cudaMemcpyDeviceToHost));
for (int i = 0; i < G.number_of_edges; ++i)
EXPECT_TRUE(compare_close(result[i], expected[i], TEST_EPSILON, TEST_ZERO_THRESHOLD))
<< "[MISMATCH] vaid = " << i << ", cugraph = " << result[i]
<< " expected = " << expected[i];
}
};
// ============================================================================
// Tests
// ============================================================================
// Verifiy Un-Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NO_NORMALIZE)
{
run_current_test<int, int, float, float, false>(GetParam());
}
TEST_P(Tests_EdgeBC, CheckFP64_NO_NORMALIZE)
{
run_current_test<int, int, double, double, false>(GetParam());
}
// Verifiy Normalized results
TEST_P(Tests_EdgeBC, CheckFP32_NORMALIZE)
{
run_current_test<int, int, float, float, true>(GetParam());
}
TEST_P(Tests_EdgeBC, CheckFP64_NORMALIZE)
{
run_current_test<int, int, double, double, true>(GetParam());
}
// FIXME: There is an InvalidValue on a Memcopy only on tests/datasets/dblp.mtx
INSTANTIATE_TEST_CASE_P(simple_test,
Tests_EdgeBC,
::testing::Values(EdgeBC_Usecase("test/datasets/karate.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 0),
EdgeBC_Usecase("test/datasets/netscience.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki2003.mtx", 4),
EdgeBC_Usecase("test/datasets/wiki-Talk.mtx", 4)));
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
auto resource = std::make_unique<rmm::mr::cuda_memory_resource>();
rmm::mr::set_default_resource(resource.get());
int rc = RUN_ALL_TESTS();
return rc;
}
|
e2b70d3dc746a95bf423bb86b300769c7287f1ec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mlfe/operators/impl/cuda/kernel/maxpool2d.h"
#include "mlfe/operators/utils.h"
#include "mlfe/device_context/cuda_context.h"
#include <third_party/cub/hipcub/hipcub.hpp>
namespace mlfe{
namespace cuda_kernel{
template <typename T> __global__
void maxpool2d_nhwc_kernel(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const T* x_ptr,
T* y_ptr
)
{
CUDA_1D_KERNEL_LOOP(n, B){
for(int ph = 0; ph < OH; ++ph){
for(int pw = 0; pw < OW; ++pw){
for(int c = 0; c < IC; ++c){
int hstart = ph * stride;
int wstart = pw * stride;
int hend = min(hstart + ksize, IH);
int wend = min(wstart + ksize, IW);
const int pool_index = ph * OW * IC + pw * IC + c;
T max_val = -FLT_MAX;
for(int h = hstart; h < hend; ++h){
for(int w = wstart; w < wend; ++w){
T cur_val = x_ptr[h * IW * IC + w * IC + c];
if(cur_val > max_val){
y_ptr[pool_index] = cur_val;
max_val = cur_val;
}
}
}
}
}
}
x_ptr += IH * IW * IC;
y_ptr += OH * OW * IC;
}
}
template <>
void maxpool2d_nhwc<float>(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const float* x_ptr,
float* y_ptr)
{
hipLaunchKernelGGL(( maxpool2d_nhwc_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(B)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, B, IC, IH, IW, OH, OW, ksize, stride, x_ptr, y_ptr);
}
template <typename T> __global__
void maxpool2d_grad_nhwc_kernel(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const T* x_ptr,
const T* y_ptr,
const T* dy_ptr,
T* dx_ptr
)
{
CUDA_1D_KERNEL_LOOP(n, B){
for(int ph = 0; ph < OH; ++ph){
for(int pw = 0; pw < OW; ++pw){
for(int c = 0; c < IC; ++c){
int hstart = ph * stride;
int wstart = pw * stride;
int hend = min(hstart + ksize, IH);
int wend = min(wstart + ksize, IW);
const int pool_index = ph * OW * IC + pw * IC + c;
int max_idx = -1;
T max_val = -FLT_MAX;
for(int h = hstart; h < hend; ++h){
for(int w = wstart; w < wend; ++w){
const int index = h * IW * IC + w * IC + c;
if(x_ptr[index] > max_val){
max_val = x_ptr[index];
max_idx = index;
}
}
}
dx_ptr[max_idx] = dy_ptr[pool_index];
}
}
}
x_ptr += IH * IW * IC;
dy_ptr += OH * OW * IC;
dx_ptr += IH * IW * IC;
}
}
template <>
void maxpool2d_grad_nhwc<float>(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const float* x_ptr,
const float* y_ptr,
const float* dy_ptr,
float* dx_ptr)
{
hipLaunchKernelGGL(( maxpool2d_grad_nhwc_kernel<float>), dim3(CUDA_CONTEXT_GET_BLOCKS(B)),
dim3(CUDA_CONTEXT_NUM_THREADS), 0, 0, B, IC, IH, IW, OH, OW, ksize, stride, x_ptr, y_ptr, dy_ptr, dx_ptr);
}
} // namespace cuda_kernel
} // namespace mlfe
|
e2b70d3dc746a95bf423bb86b300769c7287f1ec.cu
|
#include "mlfe/operators/impl/cuda/kernel/maxpool2d.h"
#include "mlfe/operators/utils.h"
#include "mlfe/device_context/cuda_context.h"
#include <third_party/cub/cub/block/block_reduce.cuh>
namespace mlfe{
namespace cuda_kernel{
template <typename T> __global__
void maxpool2d_nhwc_kernel(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const T* x_ptr,
T* y_ptr
)
{
CUDA_1D_KERNEL_LOOP(n, B){
for(int ph = 0; ph < OH; ++ph){
for(int pw = 0; pw < OW; ++pw){
for(int c = 0; c < IC; ++c){
int hstart = ph * stride;
int wstart = pw * stride;
int hend = min(hstart + ksize, IH);
int wend = min(wstart + ksize, IW);
const int pool_index = ph * OW * IC + pw * IC + c;
T max_val = -FLT_MAX;
for(int h = hstart; h < hend; ++h){
for(int w = wstart; w < wend; ++w){
T cur_val = x_ptr[h * IW * IC + w * IC + c];
if(cur_val > max_val){
y_ptr[pool_index] = cur_val;
max_val = cur_val;
}
}
}
}
}
}
x_ptr += IH * IW * IC;
y_ptr += OH * OW * IC;
}
}
template <>
void maxpool2d_nhwc<float>(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const float* x_ptr,
float* y_ptr)
{
maxpool2d_nhwc_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(B),
CUDA_CONTEXT_NUM_THREADS>>>(B, IC, IH, IW, OH, OW, ksize, stride, x_ptr, y_ptr);
}
template <typename T> __global__
void maxpool2d_grad_nhwc_kernel(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const T* x_ptr,
const T* y_ptr,
const T* dy_ptr,
T* dx_ptr
)
{
CUDA_1D_KERNEL_LOOP(n, B){
for(int ph = 0; ph < OH; ++ph){
for(int pw = 0; pw < OW; ++pw){
for(int c = 0; c < IC; ++c){
int hstart = ph * stride;
int wstart = pw * stride;
int hend = min(hstart + ksize, IH);
int wend = min(wstart + ksize, IW);
const int pool_index = ph * OW * IC + pw * IC + c;
int max_idx = -1;
T max_val = -FLT_MAX;
for(int h = hstart; h < hend; ++h){
for(int w = wstart; w < wend; ++w){
const int index = h * IW * IC + w * IC + c;
if(x_ptr[index] > max_val){
max_val = x_ptr[index];
max_idx = index;
}
}
}
dx_ptr[max_idx] = dy_ptr[pool_index];
}
}
}
x_ptr += IH * IW * IC;
dy_ptr += OH * OW * IC;
dx_ptr += IH * IW * IC;
}
}
template <>
void maxpool2d_grad_nhwc<float>(
const int B,
const int IC,
const int IH,
const int IW,
const int OH,
const int OW,
const int ksize,
const int stride,
const float* x_ptr,
const float* y_ptr,
const float* dy_ptr,
float* dx_ptr)
{
maxpool2d_grad_nhwc_kernel<float><<<CUDA_CONTEXT_GET_BLOCKS(B),
CUDA_CONTEXT_NUM_THREADS>>>(B, IC, IH, IW, OH, OW, ksize, stride, x_ptr, y_ptr, dy_ptr, dx_ptr);
}
} // namespace cuda_kernel
} // namespace mlfe
|
fec271f56f087f28ff6f9f0df72fcdbd19614f94.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Eigen/Dense>
#include <tdp/data/image.h>
#include <tdp/eigen/dense.h>
#include <tdp/calibration/planeEstimation.h>
#include <tdp/camera/camera.h>
#include <tdp/nvidia/helper_cuda.h>
namespace tdp {
__host__ __device__
float HuberCost(float x, float alpha) {
float absx = fabs(x);
return absx <= alpha ? 0.5*x*x : alpha*(absx - 0.5*alpha);
}
__host__ __device__
float HuberCostDeriv(float x, float alpha) {
return (x < alpha ? - alpha : (x > alpha ? alpha : x));
}
__global__
void KernelPlaneEstimationHuberDeriv(
Image<float> d,
Camera<float> cam,
Vector3fda nd,
float alpha,
Image<float> f,
Image<Vector3fda> deriv
) {
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
if (idx < d.w_ && idy < d.h_) {
float di = d(idx,idy);
if (di==di) {
Vector3fda p = cam.Unproject(idx,idy,di);
float xi = nd.dot(p)+1;
f(idx,idy) = HuberCost(xi,alpha);
deriv(idx,idy) = HuberCostDeriv(xi,alpha)*p;
} else {
f(idx,idy) = 0./0.;
deriv(idx,idy)(0) = 0./0.;
deriv(idx,idy)(1) = 0./0.;
deriv(idx,idy)(2) = 0./0.;
}
}
}
void PlaneEstimationHuberDeriv(
const Image<float>& d,
const Camera<float>& cam,
const Vector3fda& nd,
float alpha,
Image<float>& f,
Image<Vector3fda>& deriv) {
dim3 threads, blocks;
ComputeKernelParamsForImage(blocks,threads,d,32,32);
hipLaunchKernelGGL(( KernelPlaneEstimationHuberDeriv), dim3(blocks),dim3(threads), 0, 0, d,cam,nd,alpha,f,deriv);
checkCudaErrors(hipDeviceSynchronize());
}
}
|
fec271f56f087f28ff6f9f0df72fcdbd19614f94.cu
|
#include <Eigen/Dense>
#include <tdp/data/image.h>
#include <tdp/eigen/dense.h>
#include <tdp/calibration/planeEstimation.h>
#include <tdp/camera/camera.h>
#include <tdp/nvidia/helper_cuda.h>
namespace tdp {
__host__ __device__
float HuberCost(float x, float alpha) {
float absx = fabs(x);
return absx <= alpha ? 0.5*x*x : alpha*(absx - 0.5*alpha);
}
__host__ __device__
float HuberCostDeriv(float x, float alpha) {
return (x < alpha ? - alpha : (x > alpha ? alpha : x));
}
__global__
void KernelPlaneEstimationHuberDeriv(
Image<float> d,
Camera<float> cam,
Vector3fda nd,
float alpha,
Image<float> f,
Image<Vector3fda> deriv
) {
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
if (idx < d.w_ && idy < d.h_) {
float di = d(idx,idy);
if (di==di) {
Vector3fda p = cam.Unproject(idx,idy,di);
float xi = nd.dot(p)+1;
f(idx,idy) = HuberCost(xi,alpha);
deriv(idx,idy) = HuberCostDeriv(xi,alpha)*p;
} else {
f(idx,idy) = 0./0.;
deriv(idx,idy)(0) = 0./0.;
deriv(idx,idy)(1) = 0./0.;
deriv(idx,idy)(2) = 0./0.;
}
}
}
void PlaneEstimationHuberDeriv(
const Image<float>& d,
const Camera<float>& cam,
const Vector3fda& nd,
float alpha,
Image<float>& f,
Image<Vector3fda>& deriv) {
dim3 threads, blocks;
ComputeKernelParamsForImage(blocks,threads,d,32,32);
KernelPlaneEstimationHuberDeriv<<<blocks,threads>>>(d,cam,nd,alpha,f,deriv);
checkCudaErrors(cudaDeviceSynchronize());
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.