hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
7b17c2813aa84c837b9380c21ebfdd6c32d92ce1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "../common/check.h"
#define N 256
__constant__ int const_a[N];
__constant__ int const_b[N];
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void add_vector(int *dev_c)
{
int tx = threadIdx.x;
dev_c[tx] = const_a[tx] + const_b[tx];
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void)
{
int *host_a, *host_b, *host_c;
int *dev_c;
// Dclaration vecteurs sur HOST
host_a = (int*)malloc(N * sizeof(int));
host_b = (int*)malloc(N * sizeof(int));
host_c = (int*)malloc(N * sizeof(int));
// Initialisation des vecteurs
for (int i=0; i<N; i++)
{
host_a[i] = i;
host_b[i] = i*i;
}
// Allocation des vecteurs sur DEVICE
hipMalloc( (void**)&dev_c, N*sizeof(int));
// Copie des donnes vers GPU
hipMemcpyToSymbol(const_a, host_a, N*sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(const_b, host_b, N*sizeof(int), 0, hipMemcpyHostToDevice);
// appel kernel
dim3 blockPerGrid (1, 1, 1);
dim3 ThreadPerBlock (N, 1, 1);
hipLaunchKernelGGL(( add_vector), dim3(blockPerGrid), dim3(ThreadPerBlock), 0, 0, dev_c);
// Copie des donnes depuis le GPU
hipMemcpy(host_c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
// Affichage rsultats
for (int i=0; i<N; i++)
{
if (i%32 == 0) printf("\n");
printf("%5d ", host_c[i]);
}
// Libration mmoire CPU
free(host_a);
free(host_b);
free(host_c);
// Libration mmoire GPU
hipFree(dev_c);
// Pas de free des mmoires constantes !!
return 0;
}
| 7b17c2813aa84c837b9380c21ebfdd6c32d92ce1.cu | #include <stdio.h>
#include <stdlib.h>
#include "../common/check.h"
#define N 256
__constant__ int const_a[N];
__constant__ int const_b[N];
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void add_vector(int *dev_c)
{
int tx = threadIdx.x;
dev_c[tx] = const_a[tx] + const_b[tx];
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void)
{
int *host_a, *host_b, *host_c;
int *dev_c;
// Déclaration vecteurs sur HOST
host_a = (int*)malloc(N * sizeof(int));
host_b = (int*)malloc(N * sizeof(int));
host_c = (int*)malloc(N * sizeof(int));
// Initialisation des vecteurs
for (int i=0; i<N; i++)
{
host_a[i] = i;
host_b[i] = i*i;
}
// Allocation des vecteurs sur DEVICE
cudaMalloc( (void**)&dev_c, N*sizeof(int));
// Copie des données vers GPU
cudaMemcpyToSymbol(const_a, host_a, N*sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(const_b, host_b, N*sizeof(int), 0, cudaMemcpyHostToDevice);
// appel kernel
dim3 blockPerGrid (1, 1, 1);
dim3 ThreadPerBlock (N, 1, 1);
add_vector<<<blockPerGrid, ThreadPerBlock>>>(dev_c);
// Copie des données depuis le GPU
cudaMemcpy(host_c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
// Affichage résultats
for (int i=0; i<N; i++)
{
if (i%32 == 0) printf("\n");
printf("%5d ", host_c[i]);
}
// Libération mémoire CPU
free(host_a);
free(host_b);
free(host_c);
// Libération mémoire GPU
cudaFree(dev_c);
// Pas de free des mémoires constantes !!
return 0;
}
|
3ac692c5a665212fbbf6a79794495253a9a157db.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Eyal Rozenberg <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudf_test_utils.cuh"
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <utilities/type_dispatcher.hpp>
namespace {
namespace detail {
// When streaming char-like types, the standard library streams tend to treat
// them as characters rather than numbers, e.g. you would get an 'a' instead of 97.
// The following function(s) ensure we "promote" such values to integers before
// they're streamed
template <typename T>
const T& promote_for_streaming(const T& x) { return x; }
//int promote_for_streaming(const char& x) { return x; }
//int promote_for_streaming(const unsigned char& x) { return x; }
int promote_for_streaming(const signed char& x) { return x; }
} // namespace detail
struct column_printer {
template<typename Element>
void operator()(gdf_column const* the_column, unsigned min_printing_width)
{
gdf_size_type num_rows { the_column->size };
Element const* column_data { static_cast<Element const*>(the_column->data) };
std::vector<Element> host_side_data(num_rows);
hipMemcpy(host_side_data.data(), column_data, num_rows * sizeof(Element), hipMemcpyDeviceToHost);
gdf_size_type const num_masks { gdf_valid_allocation_size(num_rows) };
std::vector<gdf_valid_type> h_mask(num_masks, ~gdf_valid_type { 0 });
if (nullptr != the_column->valid) {
hipMemcpy(h_mask.data(), the_column->valid, num_masks * sizeof(gdf_valid_type), hipMemcpyDeviceToHost);
}
for (gdf_size_type i = 0; i < num_rows; ++i) {
std::cout << std::setw(min_printing_width);
if (gdf_is_valid(h_mask.data(), i)) {
std::cout << detail::promote_for_streaming(host_side_data[i]);
}
else {
std::cout << null_representative;
}
std::cout << ' ';
}
std::cout << std::endl;
if(the_column->dtype == GDF_STRING_CATEGORY){
std::cout<<"Data on category:\n";
size_t length = 1;
if(the_column->dtype_info.category != nullptr){
size_t keys_size = static_cast<NVCategory *>(the_column->dtype_info.category)->keys_size();
if(keys_size>0){
char ** data = new char *[keys_size];
for(size_t i=0; i<keys_size; i++){
data[i]=new char[length+1];
}
static_cast<NVCategory *>(the_column->dtype_info.category)->get_keys()->to_host(data, 0, keys_size);
for(size_t i=0; i<keys_size; i++){
data[i][length]=0;
}
for(size_t i=0; i<keys_size; i++){
std::cout<<"("<<data[i]<<"|"<<i<<")\t";
}
std::cout<<std::endl;
}
}
}
}
};
/**---------------------------------------------------------------------------*
* @brief Functor for comparing if two elements between two gdf_columns are
* equal.
*
*---------------------------------------------------------------------------**/
template <typename T, bool has_nulls>
struct elements_equal {
gdf_column lhs_col;
gdf_column rhs_col;
bool nulls_are_equivalent;
using bit_mask_t = bit_mask::bit_mask_t;
/**---------------------------------------------------------------------------*
* @brief Constructs functor for comparing elements between two gdf_column's
*
* @param lhs The left column for comparison
* @param rhs The right column for comparison
* @param nulls_are_equal Desired behavior for whether or not nulls are
* treated as equal to other nulls. Defaults to true.
*---------------------------------------------------------------------------**/
__host__ __device__ elements_equal(gdf_column lhs, gdf_column rhs,
bool nulls_are_equal = true)
: lhs_col{lhs}, rhs_col{rhs}, nulls_are_equivalent{nulls_are_equal} {}
__device__ bool operator()(gdf_index_type row) {
bool const lhs_is_valid{gdf_is_valid(lhs_col.valid, row)};
bool const rhs_is_valid{gdf_is_valid(rhs_col.valid, row)};
if (lhs_is_valid and rhs_is_valid) {
return static_cast<T const*>(lhs_col.data)[row] ==
static_cast<T const*>(rhs_col.data)[row];
}
// If one value is valid but the other is not
if (lhs_is_valid != rhs_is_valid) {
return false;
}
return nulls_are_equivalent;
}
};
} // namespace anonymous
/**
* ---------------------------------------------------------------------------*
* @brief Compare two gdf_columns on all fields, including pairwise comparison
* of data and valid arrays
*
* @tparam T The type of columns to compare
* @param left The left column
* @param right The right column
* @return bool Whether or not the columns are equal
* ---------------------------------------------------------------------------**/
template <typename T>
bool gdf_equal_columns(gdf_column const& left, gdf_column const& right)
{
if (left.size != right.size) return false;
if (left.dtype != right.dtype) return false;
if (left.null_count != right.null_count) return false;
if (left.dtype_info.time_unit != right.dtype_info.time_unit) return false;
if ((left.col_name == nullptr) != (right.col_name == nullptr))
return false; // if one is null but not both
if (left.col_name != nullptr && std::strcmp(left.col_name, right.col_name) != 0)
return false;
if ((left.data == nullptr) != (right.data == nullptr))
return false; // if one is null but not both
if ((left.valid == nullptr) != (right.valid == nullptr))
return false; // if one is null but not both
if (left.data == nullptr)
return true; // logically, both are null
// both are non-null...
bool const has_nulls {(left.valid != nullptr) && (left.null_count > 0)};
bool equal_data = (has_nulls) ?
thrust::all_of(rmm::exec_policy()->on(0),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(left.size),
elements_equal<T, true>{left, right}) :
thrust::all_of(rmm::exec_policy()->on(0),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(left.size),
elements_equal<T, false>{left, right});
CHECK_STREAM(0);
return equal_data;
}
namespace {
struct columns_equal
{
template <typename T>
bool operator()(gdf_column const& left, gdf_column const& right) {
return gdf_equal_columns<T>(left, right);
}
};
}; // namespace anonymous
// Type-erased version of gdf_equal_columns
bool gdf_equal_columns(gdf_column const& left, gdf_column const& right)
{
return cudf::type_dispatcher(left.dtype, columns_equal{}, left, right);
}
void print_gdf_column(gdf_column const * the_column, unsigned min_printing_width)
{
cudf::type_dispatcher(the_column->dtype, column_printer{},
the_column, min_printing_width);
}
void print_valid_data(const gdf_valid_type *validity_mask,
const size_t num_rows)
{
hipError_t error;
hipPointerAttribute_t attrib;
hipPointerGetAttributes(&attrib, validity_mask);
error = hipGetLastError();
std::vector<gdf_valid_type> h_mask(gdf_valid_allocation_size(num_rows));
if (error != hipErrorInvalidValue && isDeviceType(attrib))
hipMemcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows),
hipMemcpyDeviceToHost);
else
memcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows));
std::transform(
h_mask.begin(), h_mask.begin() + gdf_num_bitmask_elements(num_rows),
std::ostream_iterator<std::string>(std::cout, " "), [](gdf_valid_type x) {
auto bits = std::bitset<GDF_VALID_BITSIZE>(x).to_string('@');
return std::string(bits.rbegin(), bits.rend());
});
std::cout << std::endl;
}
gdf_size_type count_valid_bits_host(
std::vector<gdf_valid_type> const& masks, gdf_size_type const num_rows)
{
if ((0 == num_rows) || (0 == masks.size())) {
return 0;
}
gdf_size_type count{0};
// Count the valid bits for all masks except the last one
for (gdf_size_type i = 0; i < (gdf_num_bitmask_elements(num_rows) - 1); ++i) {
gdf_valid_type current_mask = masks[i];
while (current_mask > 0) {
current_mask &= (current_mask - 1);
count++;
}
}
// Only count the bits in the last mask that correspond to rows
int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE;
if (num_rows_last_mask == 0) {
num_rows_last_mask = GDF_VALID_BITSIZE;
}
// Mask off only the bits that correspond to rows
gdf_valid_type const rows_mask = ( gdf_valid_type{1} << num_rows_last_mask ) - 1;
gdf_valid_type last_mask = masks[gdf_num_bitmask_elements(num_rows) - 1] & rows_mask;
while (last_mask > 0) {
last_mask &= (last_mask - 1);
count++;
}
return count;
}
| 3ac692c5a665212fbbf6a79794495253a9a157db.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Eyal Rozenberg <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudf_test_utils.cuh"
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <utilities/type_dispatcher.hpp>
namespace {
namespace detail {
// When streaming char-like types, the standard library streams tend to treat
// them as characters rather than numbers, e.g. you would get an 'a' instead of 97.
// The following function(s) ensure we "promote" such values to integers before
// they're streamed
template <typename T>
const T& promote_for_streaming(const T& x) { return x; }
//int promote_for_streaming(const char& x) { return x; }
//int promote_for_streaming(const unsigned char& x) { return x; }
int promote_for_streaming(const signed char& x) { return x; }
} // namespace detail
struct column_printer {
template<typename Element>
void operator()(gdf_column const* the_column, unsigned min_printing_width)
{
gdf_size_type num_rows { the_column->size };
Element const* column_data { static_cast<Element const*>(the_column->data) };
std::vector<Element> host_side_data(num_rows);
cudaMemcpy(host_side_data.data(), column_data, num_rows * sizeof(Element), cudaMemcpyDeviceToHost);
gdf_size_type const num_masks { gdf_valid_allocation_size(num_rows) };
std::vector<gdf_valid_type> h_mask(num_masks, ~gdf_valid_type { 0 });
if (nullptr != the_column->valid) {
cudaMemcpy(h_mask.data(), the_column->valid, num_masks * sizeof(gdf_valid_type), cudaMemcpyDeviceToHost);
}
for (gdf_size_type i = 0; i < num_rows; ++i) {
std::cout << std::setw(min_printing_width);
if (gdf_is_valid(h_mask.data(), i)) {
std::cout << detail::promote_for_streaming(host_side_data[i]);
}
else {
std::cout << null_representative;
}
std::cout << ' ';
}
std::cout << std::endl;
if(the_column->dtype == GDF_STRING_CATEGORY){
std::cout<<"Data on category:\n";
size_t length = 1;
if(the_column->dtype_info.category != nullptr){
size_t keys_size = static_cast<NVCategory *>(the_column->dtype_info.category)->keys_size();
if(keys_size>0){
char ** data = new char *[keys_size];
for(size_t i=0; i<keys_size; i++){
data[i]=new char[length+1];
}
static_cast<NVCategory *>(the_column->dtype_info.category)->get_keys()->to_host(data, 0, keys_size);
for(size_t i=0; i<keys_size; i++){
data[i][length]=0;
}
for(size_t i=0; i<keys_size; i++){
std::cout<<"("<<data[i]<<"|"<<i<<")\t";
}
std::cout<<std::endl;
}
}
}
}
};
/**---------------------------------------------------------------------------*
* @brief Functor for comparing if two elements between two gdf_columns are
* equal.
*
*---------------------------------------------------------------------------**/
template <typename T, bool has_nulls>
struct elements_equal {
gdf_column lhs_col;
gdf_column rhs_col;
bool nulls_are_equivalent;
using bit_mask_t = bit_mask::bit_mask_t;
/**---------------------------------------------------------------------------*
* @brief Constructs functor for comparing elements between two gdf_column's
*
* @param lhs The left column for comparison
* @param rhs The right column for comparison
* @param nulls_are_equal Desired behavior for whether or not nulls are
* treated as equal to other nulls. Defaults to true.
*---------------------------------------------------------------------------**/
__host__ __device__ elements_equal(gdf_column lhs, gdf_column rhs,
bool nulls_are_equal = true)
: lhs_col{lhs}, rhs_col{rhs}, nulls_are_equivalent{nulls_are_equal} {}
__device__ bool operator()(gdf_index_type row) {
bool const lhs_is_valid{gdf_is_valid(lhs_col.valid, row)};
bool const rhs_is_valid{gdf_is_valid(rhs_col.valid, row)};
if (lhs_is_valid and rhs_is_valid) {
return static_cast<T const*>(lhs_col.data)[row] ==
static_cast<T const*>(rhs_col.data)[row];
}
// If one value is valid but the other is not
if (lhs_is_valid != rhs_is_valid) {
return false;
}
return nulls_are_equivalent;
}
};
} // namespace anonymous
/**
* ---------------------------------------------------------------------------*
* @brief Compare two gdf_columns on all fields, including pairwise comparison
* of data and valid arrays
*
* @tparam T The type of columns to compare
* @param left The left column
* @param right The right column
* @return bool Whether or not the columns are equal
* ---------------------------------------------------------------------------**/
template <typename T>
bool gdf_equal_columns(gdf_column const& left, gdf_column const& right)
{
if (left.size != right.size) return false;
if (left.dtype != right.dtype) return false;
if (left.null_count != right.null_count) return false;
if (left.dtype_info.time_unit != right.dtype_info.time_unit) return false;
if ((left.col_name == nullptr) != (right.col_name == nullptr))
return false; // if one is null but not both
if (left.col_name != nullptr && std::strcmp(left.col_name, right.col_name) != 0)
return false;
if ((left.data == nullptr) != (right.data == nullptr))
return false; // if one is null but not both
if ((left.valid == nullptr) != (right.valid == nullptr))
return false; // if one is null but not both
if (left.data == nullptr)
return true; // logically, both are null
// both are non-null...
bool const has_nulls {(left.valid != nullptr) && (left.null_count > 0)};
bool equal_data = (has_nulls) ?
thrust::all_of(rmm::exec_policy()->on(0),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(left.size),
elements_equal<T, true>{left, right}) :
thrust::all_of(rmm::exec_policy()->on(0),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(left.size),
elements_equal<T, false>{left, right});
CHECK_STREAM(0);
return equal_data;
}
namespace {
struct columns_equal
{
template <typename T>
bool operator()(gdf_column const& left, gdf_column const& right) {
return gdf_equal_columns<T>(left, right);
}
};
}; // namespace anonymous
// Type-erased version of gdf_equal_columns
bool gdf_equal_columns(gdf_column const& left, gdf_column const& right)
{
return cudf::type_dispatcher(left.dtype, columns_equal{}, left, right);
}
void print_gdf_column(gdf_column const * the_column, unsigned min_printing_width)
{
cudf::type_dispatcher(the_column->dtype, column_printer{},
the_column, min_printing_width);
}
void print_valid_data(const gdf_valid_type *validity_mask,
const size_t num_rows)
{
cudaError_t error;
cudaPointerAttributes attrib;
cudaPointerGetAttributes(&attrib, validity_mask);
error = cudaGetLastError();
std::vector<gdf_valid_type> h_mask(gdf_valid_allocation_size(num_rows));
if (error != cudaErrorInvalidValue && isDeviceType(attrib))
cudaMemcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows),
cudaMemcpyDeviceToHost);
else
memcpy(h_mask.data(), validity_mask, gdf_valid_allocation_size(num_rows));
std::transform(
h_mask.begin(), h_mask.begin() + gdf_num_bitmask_elements(num_rows),
std::ostream_iterator<std::string>(std::cout, " "), [](gdf_valid_type x) {
auto bits = std::bitset<GDF_VALID_BITSIZE>(x).to_string('@');
return std::string(bits.rbegin(), bits.rend());
});
std::cout << std::endl;
}
gdf_size_type count_valid_bits_host(
std::vector<gdf_valid_type> const& masks, gdf_size_type const num_rows)
{
if ((0 == num_rows) || (0 == masks.size())) {
return 0;
}
gdf_size_type count{0};
// Count the valid bits for all masks except the last one
for (gdf_size_type i = 0; i < (gdf_num_bitmask_elements(num_rows) - 1); ++i) {
gdf_valid_type current_mask = masks[i];
while (current_mask > 0) {
current_mask &= (current_mask - 1);
count++;
}
}
// Only count the bits in the last mask that correspond to rows
int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE;
if (num_rows_last_mask == 0) {
num_rows_last_mask = GDF_VALID_BITSIZE;
}
// Mask off only the bits that correspond to rows
gdf_valid_type const rows_mask = ( gdf_valid_type{1} << num_rows_last_mask ) - 1;
gdf_valid_type last_mask = masks[gdf_num_bitmask_elements(num_rows) - 1] & rows_mask;
while (last_mask > 0) {
last_mask &= (last_mask - 1);
count++;
}
return count;
}
|
475bb405ad5bbc5b6fe78f968b75f8689163307a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GetMatrixA.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *OCTData = NULL;
hipMalloc(&OCTData, XSIZE*YSIZE);
float *MatrixA = NULL;
hipMalloc(&MatrixA, XSIZE*YSIZE);
int NumPolynomial = 1;
int OneDataSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GetMatrixA), dim3(gridBlock),dim3(threadBlock), 0, 0, OCTData,MatrixA,NumPolynomial,OneDataSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GetMatrixA), dim3(gridBlock),dim3(threadBlock), 0, 0, OCTData,MatrixA,NumPolynomial,OneDataSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GetMatrixA), dim3(gridBlock),dim3(threadBlock), 0, 0, OCTData,MatrixA,NumPolynomial,OneDataSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 475bb405ad5bbc5b6fe78f968b75f8689163307a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GetMatrixA.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *OCTData = NULL;
cudaMalloc(&OCTData, XSIZE*YSIZE);
float *MatrixA = NULL;
cudaMalloc(&MatrixA, XSIZE*YSIZE);
int NumPolynomial = 1;
int OneDataSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GetMatrixA<<<gridBlock,threadBlock>>>(OCTData,MatrixA,NumPolynomial,OneDataSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GetMatrixA<<<gridBlock,threadBlock>>>(OCTData,MatrixA,NumPolynomial,OneDataSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GetMatrixA<<<gridBlock,threadBlock>>>(OCTData,MatrixA,NumPolynomial,OneDataSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
40a2db1fd1b0823530456954924fe06b0d4458ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "d3d/common.h"
#include "d3d/box/iou.h"
#include "d3d/box/utils.cuh"
using namespace std;
using namespace torch;
using namespace dgal;
template <typename scalar_t>
__global__ void pdist2dr_forward_kernel(
const _CudaAccessor(2) points_,
const _CudaAccessor(2) boxes_,
_CudaAccessor(2) distance_,
_CudaAccessorT(uint8_t, 2) iedge_
) {
using BoxType = Quad2<scalar_t>;
const int nm = blockIdx.x * blockDim.x + threadIdx.x;
const auto N = boxes_.size(0);
const auto M = points_.size(0);
if (nm < N*M)
{
const int i = nm % N;
const int j = nm / N;
BoxType b = _BoxUtilCuda<scalar_t, BoxType>::make_box(boxes_[i]);
Point2<scalar_t> p {.x=points_[j][0], .y=points_[j][1]};
distance_[i][j] = distance(b, p, iedge_[i][j]);
}
}
tuple<Tensor, Tensor> pdist2dr_forward_cuda(
const Tensor points, const Tensor boxes
) {
Tensor distance = torch::empty({boxes.size(0), points.size(0)}, points.options());
Tensor iedge = torch::empty({boxes.size(0), points.size(0)}, torch::dtype(torch::kByte).device(points.device()));
const int total_ops = boxes.size(0) * points.size(0);
const int threads = THREADS_COUNT;
const int blocks = divup(total_ops, threads);
AT_DISPATCH_FLOATING_TYPES(points.scalar_type(), "pdist2dr_forward", [&] {
hipLaunchKernelGGL(( pdist2dr_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
points._cuda_accessor(2),
boxes._cuda_accessor(2),
distance._cuda_accessor(2),
iedge._cuda_accessor_t(uint8_t, 2));
});
return make_tuple(distance, iedge);
}
template <typename scalar_t>
__global__ void pdist2dr_backward_kernel(
const _CudaAccessor(2) points_,
const _CudaAccessor(2) boxes_,
const _CudaAccessor(2) grad_,
_CudaAccessor(2) grad_boxes_,
_CudaAccessor(2) grad_points_,
_CudaAccessorT(uint8_t, 2) iedge_
) {
using BoxType = Quad2<scalar_t>;
const int nm = blockIdx.x * blockDim.x + threadIdx.x;
const auto N = boxes_.size(0);
const auto M = points_.size(0);
if (nm < N*M)
{
const int i = nm % N;
const int j = nm / N;
BoxType b = _BoxUtilCuda<scalar_t, BoxType>::make_box(boxes_[i]);
BoxType grad_b; grad_b.zero();
Point2<scalar_t> p {.x=points_[j][0], .y=points_[j][1]};
Point2<scalar_t> grad_p;
distance_grad(b, p, grad_[i][j], grad_b, grad_p, iedge_[i][j]);
_BoxUtilCuda<scalar_t, BoxType>::make_box_grad(boxes_[i], grad_b, grad_boxes_[i]);
grad_points_[j][0] += grad_p.x;
grad_points_[j][1] += grad_p.y;
}
}
tuple<Tensor, Tensor> pdist2dr_backward_cuda(
const Tensor points, const Tensor boxes, const Tensor grad,
const Tensor iedge
) {
Tensor grad_boxes = torch::zeros_like(boxes);
Tensor grad_points = torch::zeros_like(points);
const int total_ops = boxes.size(0) * points.size(0);
const int threads = THREADS_COUNT;
const int blocks = divup(total_ops, threads);
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "pdist2dr_backward", [&] {
hipLaunchKernelGGL(( pdist2dr_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
points._cuda_accessor(2),
boxes._cuda_accessor(2),
grad._cuda_accessor(2),
grad_boxes._cuda_accessor(2),
grad_points._cuda_accessor(2),
iedge._cuda_accessor_t(uint8_t, 2));
});
return make_tuple(grad_boxes, grad_points);
}
| 40a2db1fd1b0823530456954924fe06b0d4458ab.cu | #include "d3d/common.h"
#include "d3d/box/iou.h"
#include "d3d/box/utils.cuh"
using namespace std;
using namespace torch;
using namespace dgal;
template <typename scalar_t>
__global__ void pdist2dr_forward_kernel(
const _CudaAccessor(2) points_,
const _CudaAccessor(2) boxes_,
_CudaAccessor(2) distance_,
_CudaAccessorT(uint8_t, 2) iedge_
) {
using BoxType = Quad2<scalar_t>;
const int nm = blockIdx.x * blockDim.x + threadIdx.x;
const auto N = boxes_.size(0);
const auto M = points_.size(0);
if (nm < N*M)
{
const int i = nm % N;
const int j = nm / N;
BoxType b = _BoxUtilCuda<scalar_t, BoxType>::make_box(boxes_[i]);
Point2<scalar_t> p {.x=points_[j][0], .y=points_[j][1]};
distance_[i][j] = distance(b, p, iedge_[i][j]);
}
}
tuple<Tensor, Tensor> pdist2dr_forward_cuda(
const Tensor points, const Tensor boxes
) {
Tensor distance = torch::empty({boxes.size(0), points.size(0)}, points.options());
Tensor iedge = torch::empty({boxes.size(0), points.size(0)}, torch::dtype(torch::kByte).device(points.device()));
const int total_ops = boxes.size(0) * points.size(0);
const int threads = THREADS_COUNT;
const int blocks = divup(total_ops, threads);
AT_DISPATCH_FLOATING_TYPES(points.scalar_type(), "pdist2dr_forward", [&] {
pdist2dr_forward_kernel<scalar_t><<<blocks, threads>>>(
points._cuda_accessor(2),
boxes._cuda_accessor(2),
distance._cuda_accessor(2),
iedge._cuda_accessor_t(uint8_t, 2));
});
return make_tuple(distance, iedge);
}
template <typename scalar_t>
__global__ void pdist2dr_backward_kernel(
const _CudaAccessor(2) points_,
const _CudaAccessor(2) boxes_,
const _CudaAccessor(2) grad_,
_CudaAccessor(2) grad_boxes_,
_CudaAccessor(2) grad_points_,
_CudaAccessorT(uint8_t, 2) iedge_
) {
using BoxType = Quad2<scalar_t>;
const int nm = blockIdx.x * blockDim.x + threadIdx.x;
const auto N = boxes_.size(0);
const auto M = points_.size(0);
if (nm < N*M)
{
const int i = nm % N;
const int j = nm / N;
BoxType b = _BoxUtilCuda<scalar_t, BoxType>::make_box(boxes_[i]);
BoxType grad_b; grad_b.zero();
Point2<scalar_t> p {.x=points_[j][0], .y=points_[j][1]};
Point2<scalar_t> grad_p;
distance_grad(b, p, grad_[i][j], grad_b, grad_p, iedge_[i][j]);
_BoxUtilCuda<scalar_t, BoxType>::make_box_grad(boxes_[i], grad_b, grad_boxes_[i]);
grad_points_[j][0] += grad_p.x;
grad_points_[j][1] += grad_p.y;
}
}
tuple<Tensor, Tensor> pdist2dr_backward_cuda(
const Tensor points, const Tensor boxes, const Tensor grad,
const Tensor iedge
) {
Tensor grad_boxes = torch::zeros_like(boxes);
Tensor grad_points = torch::zeros_like(points);
const int total_ops = boxes.size(0) * points.size(0);
const int threads = THREADS_COUNT;
const int blocks = divup(total_ops, threads);
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "pdist2dr_backward", [&] {
pdist2dr_backward_kernel<scalar_t><<<blocks, threads>>>(
points._cuda_accessor(2),
boxes._cuda_accessor(2),
grad._cuda_accessor(2),
grad_boxes._cuda_accessor(2),
grad_points._cuda_accessor(2),
iedge._cuda_accessor_t(uint8_t, 2));
});
return make_tuple(grad_boxes, grad_points);
}
|
2cb1f322a57df6e4336e254d7b257f9e24fafcf6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <hip/hip_runtime.h>
#define nx 680
#define ny 134
#define nz 450
#include "mysecond.c"
inline __host__ __device__ int indexTo1D(int x, int y, int z){
return x + y*nx + z*nx*ny;
}
void rtm8_cpu(float* vsq, float* current_s, float* current_r, float* next_s, float* next_r, float* image, float* a, size_t N)
{
#ifdef _OPENMP
#pragma omp parallel for collapse(3)
#endif
for (int z = 4; z < nz - 4; z++) {
for (int y = 4; y < ny - 4; y++) {
for (int x = 4; x < nx - 4; x++) {
float div =
a[0] * current_s[indexTo1D(x,y,z)] +
a[1] * (current_s[indexTo1D(x+1,y,z)] + current_s[indexTo1D(x-1,y,z)] +
current_s[indexTo1D(x,y+1,z)] + current_s[indexTo1D(x,y-1,z)] +
current_s[indexTo1D(x,y,z+1)] + current_s[indexTo1D(x,y,z-1)]) +
a[2] * (current_s[indexTo1D(x+2,y,z)] + current_s[indexTo1D(x-2,y,z)] +
current_s[indexTo1D(x,y+2,z)] + current_s[indexTo1D(x,y-2,z)] +
current_s[indexTo1D(x,y,z+2)] + current_s[indexTo1D(x,y,z-2)]) +
a[3] * (current_s[indexTo1D(x+3,y,z)] + current_s[indexTo1D(x-3,y,z)] +
current_s[indexTo1D(x,y+3,z)] + current_s[indexTo1D(x,y-3,z)] +
current_s[indexTo1D(x,y,z+3)] + current_s[indexTo1D(x,y,z-3)]) +
a[4] * (current_s[indexTo1D(x+4,y,z)] + current_s[indexTo1D(x-4,y,z)] +
current_s[indexTo1D(x,y+4,z)] + current_s[indexTo1D(x,y-4,z)] +
current_s[indexTo1D(x,y,z+4)] + current_s[indexTo1D(x,y,z-4)]);
next_s[indexTo1D(x,y,z)] = 2*current_s[indexTo1D(x,y,z)] - next_s[indexTo1D(x,y,z)]
+ vsq[indexTo1D(x,y,z)]*div;
div =
a[0] * current_r[indexTo1D(x,y,z)] +
a[1] * (current_r[indexTo1D(x+1,y,z)] + current_r[indexTo1D(x-1,y,z)] +
current_r[indexTo1D(x,y+1,z)] + current_r[indexTo1D(x,y-1,z)] +
current_r[indexTo1D(x,y,z+1)] + current_r[indexTo1D(x,y,z-1)]) +
a[2] * (current_r[indexTo1D(x+2,y,z)] + current_r[indexTo1D(x-2,y,z)] +
current_r[indexTo1D(x,y+2,z)] + current_r[indexTo1D(x,y-2,z)] +
current_r[indexTo1D(x,y,z+2)] + current_r[indexTo1D(x,y,z-2)]) +
a[3] * (current_r[indexTo1D(x+3,y,z)] + current_r[indexTo1D(x-3,y,z)] +
current_r[indexTo1D(x,y+3,z)] + current_r[indexTo1D(x,y-3,z)] +
current_r[indexTo1D(x,y,z+3)] + current_r[indexTo1D(x,y,z-3)]) +
a[4] * (current_r[indexTo1D(x+4,y,z)] + current_r[indexTo1D(x-4,y,z)] +
current_r[indexTo1D(x,y+4,z)] + current_r[indexTo1D(x,y-4,z)] +
current_r[indexTo1D(x,y,z+4)] + current_r[indexTo1D(x,y,z-4)]);
next_r[indexTo1D(x,y,z)] = 2 * current_r[indexTo1D(x,y,z)]
- next_r[indexTo1D(x,y,z)] + vsq[indexTo1D(x,y,z)] * div;
image[indexTo1D(x,y,z)] = next_s[indexTo1D(x,y,z)] * next_r[indexTo1D(x,y,z)];
}
}
}
}
__global__
void rtm8(
const float*__restrict__ vsq,
const float*__restrict__ current_s,
const float*__restrict__ current_r,
float*__restrict__ next_s,
float*__restrict__ next_r,
float*__restrict__ image,
const float*__restrict__ a,
size_t N)
{
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
float div;
if ((4 <= x && x < (nx - 4) ) && (4 <= y && y < (ny - 4)) && (4 <= z && z < (nz - 4))){
div =
a[0] * current_s[indexTo1D(x,y,z)] +
a[1] * (current_s[indexTo1D(x+1,y,z)] + current_s[indexTo1D(x-1,y,z)] +
current_s[indexTo1D(x,y+1,z)] + current_s[indexTo1D(x,y-1,z)] +
current_s[indexTo1D(x,y,z+1)] + current_s[indexTo1D(x,y,z-1)]) +
a[2] * (current_s[indexTo1D(x+2,y,z)] + current_s[indexTo1D(x-2,y,z)] +
current_s[indexTo1D(x,y+2,z)] + current_s[indexTo1D(x,y-2,z)] +
current_s[indexTo1D(x,y,z+2)] + current_s[indexTo1D(x,y,z-2)]) +
a[3] * (current_s[indexTo1D(x+3,y,z)] + current_s[indexTo1D(x-3,y,z)] +
current_s[indexTo1D(x,y+3,z)] + current_s[indexTo1D(x,y-3,z)] +
current_s[indexTo1D(x,y,z+3)] + current_s[indexTo1D(x,y,z-3)]) +
a[4] * (current_s[indexTo1D(x+4,y,z)] + current_s[indexTo1D(x-4,y,z)] +
current_s[indexTo1D(x,y+4,z)] + current_s[indexTo1D(x,y-4,z)] +
current_s[indexTo1D(x,y,z+4)] + current_s[indexTo1D(x,y,z-4)]);
next_s[indexTo1D(x,y,z)] = 2*current_s[indexTo1D(x,y,z)] - next_s[indexTo1D(x,y,z)]
+ vsq[indexTo1D(x,y,z)]*div;
div =
a[0] * current_r[indexTo1D(x,y,z)] +
a[1] * (current_r[indexTo1D(x+1,y,z)] + current_r[indexTo1D(x-1,y,z)] +
current_r[indexTo1D(x,y+1,z)] + current_r[indexTo1D(x,y-1,z)] +
current_r[indexTo1D(x,y,z+1)] + current_r[indexTo1D(x,y,z-1)]) +
a[2] * (current_r[indexTo1D(x+2,y,z)] + current_r[indexTo1D(x-2,y,z)] +
current_r[indexTo1D(x,y+2,z)] + current_r[indexTo1D(x,y-2,z)] +
current_r[indexTo1D(x,y,z+2)] + current_r[indexTo1D(x,y,z-2)]) +
a[3] * (current_r[indexTo1D(x+3,y,z)] + current_r[indexTo1D(x-3,y,z)] +
current_r[indexTo1D(x,y+3,z)] + current_r[indexTo1D(x,y-3,z)] +
current_r[indexTo1D(x,y,z+3)] + current_r[indexTo1D(x,y,z-3)]) +
a[4] * (current_r[indexTo1D(x+4,y,z)] + current_r[indexTo1D(x-4,y,z)] +
current_r[indexTo1D(x,y+4,z)] + current_r[indexTo1D(x,y-4,z)] +
current_r[indexTo1D(x,y,z+4)] + current_r[indexTo1D(x,y,z-4)]);
next_r[indexTo1D(x,y,z)] = 2 * current_r[indexTo1D(x,y,z)]
- next_r[indexTo1D(x,y,z)] + vsq[indexTo1D(x,y,z)] * div;
image[indexTo1D(x,y,z)] = next_s[indexTo1D(x,y,z)] * next_r[indexTo1D(x,y,z)];
}
}
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
const int ArraySize = nx * ny * nz;
float* next_s = (float*)malloc(ArraySize * sizeof(float));
float* current_s = (float*)malloc(ArraySize * sizeof(float));
float* next_r = (float*)malloc(ArraySize * sizeof(float));
float* current_r = (float*)malloc(ArraySize * sizeof(float));
float* vsq = (float*)malloc(ArraySize * sizeof(float));
float* image_gpu = (float*)malloc(ArraySize * sizeof(float));
float* image_cpu = (float*)malloc(ArraySize * sizeof(float));
float a[5];
double pts, t0, t1, dt, flops, pt_rate, flop_rate, speedup, memory;
memory = ArraySize*sizeof(float)*6;
pts = (double)repeat*(nx-8)*(ny-8)*(nz-8);
flops = 67*pts;
printf("memory (MB) = %lf\n", memory/1e6);
printf("pts (billions) = %lf\n", pts/1e9);
printf("Tflops = %lf\n", flops/1e12);
// Initialization of matrix
a[0] = -1./560.;
a[1] = 8./315;
a[2] = -0.2;
a[3] = 1.6;
a[4] = -1435./504.;
for (int z = 0; z < nz; z++) {
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
vsq[indexTo1D(x,y,z)] = 1.0;
next_s[indexTo1D(x,y,z)] = 0;
current_s[indexTo1D(x,y,z)] = 1.0;
next_r[indexTo1D(x,y,z)] = 0;
current_r[indexTo1D(x,y,z)] = 1.0;
image_gpu[indexTo1D(x,y,z)] = image_cpu[indexTo1D(x,y,z)] = 0.5;
}
}
}
//allocate and copy matrix to device
float* vsq_d;
float* next_s_d;
float* current_s_d;
float* next_r_d;
float* current_r_d;
float* image_d;
float* a_d;
hipMalloc(&vsq_d, ArraySize * sizeof(float));
hipMalloc(&next_s_d, ArraySize * sizeof(float));
hipMalloc(¤t_s_d, ArraySize * sizeof(float));
hipMalloc(&next_r_d, ArraySize * sizeof(float));
hipMalloc(¤t_r_d, ArraySize * sizeof(float));
hipMalloc(&image_d, ArraySize * sizeof(float));
hipMalloc(&a_d, 5 * sizeof(float));
hipMemcpy(vsq_d, vsq, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(next_s_d, next_s, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(current_s_d, current_s, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(next_r_d, next_r, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(current_r_d, current_r, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(image_d, image_gpu, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(a_d, a, 5 * sizeof(float), hipMemcpyHostToDevice);
int groupSize = 16;
int nx_pad = (nx + groupSize - 1) / groupSize ;
int ny_pad = (ny + groupSize - 1) / groupSize ;
int nz_pad = nz;
dim3 grids (nx_pad, ny_pad, nz_pad);
dim3 blocks (groupSize, groupSize, 1);
hipDeviceSynchronize();
t0 = mysecond();
// Launch the kernel repeatedly
for (int t = 0; t < repeat; t++) {
hipLaunchKernelGGL(( rtm8) , dim3(grids), dim3(blocks), 0, 0, vsq_d, current_s_d, next_s_d, current_r_d,
next_r_d, image_d, a_d, ArraySize);
}
hipDeviceSynchronize();
t1 = mysecond();
dt = t1 - t0;
//copy back image value
hipMemcpy(image_gpu, image_d, ArraySize * sizeof(float), hipMemcpyDeviceToHost);
// CPU execution
t0 = mysecond();
for (int t = 0; t < repeat; t++) {
rtm8_cpu(vsq, current_s, next_s, current_r, next_r, image_cpu, a, ArraySize);
}
t1 = mysecond();
// verification
bool ok = true;
for (int i = 0; i < ArraySize; i++) {
if (fabsf(image_cpu[i] - image_gpu[i]) > 0.1) {
printf("@index %d host: %f device %f\n", i, image_cpu[i], image_gpu[i]);
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
pt_rate = pts/dt;
flop_rate = flops/dt;
speedup = (t1 - t0) / dt;
printf("dt = %lf\n", dt);
printf("pt_rate (millions/sec) = %lf\n", pt_rate/1e6);
printf("flop_rate (Gflops) = %lf\n", flop_rate/1e9);
printf("speedup over cpu = %lf\n", speedup);
printf("average kernel execution time = %lf (s)\n", dt / repeat);
//release arrays
free(vsq);
free(next_s);
free(current_s);
free(next_r);
free(current_r);
free(image_cpu);
free(image_gpu);
hipFree(vsq_d);
hipFree(next_s_d);
hipFree(current_s_d);
hipFree(next_r_d);
hipFree(current_r_d);
hipFree(image_d);
hipFree(a_d);
return 0;
}
| 2cb1f322a57df6e4336e254d7b257f9e24fafcf6.cu | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <hip/hip_runtime.h>
#define nx 680
#define ny 134
#define nz 450
#include "mysecond.c"
inline __host__ __device__ int indexTo1D(int x, int y, int z){
return x + y*nx + z*nx*ny;
}
void rtm8_cpu(float* vsq, float* current_s, float* current_r, float* next_s, float* next_r, float* image, float* a, size_t N)
{
#ifdef _OPENMP
#pragma omp parallel for collapse(3)
#endif
for (int z = 4; z < nz - 4; z++) {
for (int y = 4; y < ny - 4; y++) {
for (int x = 4; x < nx - 4; x++) {
float div =
a[0] * current_s[indexTo1D(x,y,z)] +
a[1] * (current_s[indexTo1D(x+1,y,z)] + current_s[indexTo1D(x-1,y,z)] +
current_s[indexTo1D(x,y+1,z)] + current_s[indexTo1D(x,y-1,z)] +
current_s[indexTo1D(x,y,z+1)] + current_s[indexTo1D(x,y,z-1)]) +
a[2] * (current_s[indexTo1D(x+2,y,z)] + current_s[indexTo1D(x-2,y,z)] +
current_s[indexTo1D(x,y+2,z)] + current_s[indexTo1D(x,y-2,z)] +
current_s[indexTo1D(x,y,z+2)] + current_s[indexTo1D(x,y,z-2)]) +
a[3] * (current_s[indexTo1D(x+3,y,z)] + current_s[indexTo1D(x-3,y,z)] +
current_s[indexTo1D(x,y+3,z)] + current_s[indexTo1D(x,y-3,z)] +
current_s[indexTo1D(x,y,z+3)] + current_s[indexTo1D(x,y,z-3)]) +
a[4] * (current_s[indexTo1D(x+4,y,z)] + current_s[indexTo1D(x-4,y,z)] +
current_s[indexTo1D(x,y+4,z)] + current_s[indexTo1D(x,y-4,z)] +
current_s[indexTo1D(x,y,z+4)] + current_s[indexTo1D(x,y,z-4)]);
next_s[indexTo1D(x,y,z)] = 2*current_s[indexTo1D(x,y,z)] - next_s[indexTo1D(x,y,z)]
+ vsq[indexTo1D(x,y,z)]*div;
div =
a[0] * current_r[indexTo1D(x,y,z)] +
a[1] * (current_r[indexTo1D(x+1,y,z)] + current_r[indexTo1D(x-1,y,z)] +
current_r[indexTo1D(x,y+1,z)] + current_r[indexTo1D(x,y-1,z)] +
current_r[indexTo1D(x,y,z+1)] + current_r[indexTo1D(x,y,z-1)]) +
a[2] * (current_r[indexTo1D(x+2,y,z)] + current_r[indexTo1D(x-2,y,z)] +
current_r[indexTo1D(x,y+2,z)] + current_r[indexTo1D(x,y-2,z)] +
current_r[indexTo1D(x,y,z+2)] + current_r[indexTo1D(x,y,z-2)]) +
a[3] * (current_r[indexTo1D(x+3,y,z)] + current_r[indexTo1D(x-3,y,z)] +
current_r[indexTo1D(x,y+3,z)] + current_r[indexTo1D(x,y-3,z)] +
current_r[indexTo1D(x,y,z+3)] + current_r[indexTo1D(x,y,z-3)]) +
a[4] * (current_r[indexTo1D(x+4,y,z)] + current_r[indexTo1D(x-4,y,z)] +
current_r[indexTo1D(x,y+4,z)] + current_r[indexTo1D(x,y-4,z)] +
current_r[indexTo1D(x,y,z+4)] + current_r[indexTo1D(x,y,z-4)]);
next_r[indexTo1D(x,y,z)] = 2 * current_r[indexTo1D(x,y,z)]
- next_r[indexTo1D(x,y,z)] + vsq[indexTo1D(x,y,z)] * div;
image[indexTo1D(x,y,z)] = next_s[indexTo1D(x,y,z)] * next_r[indexTo1D(x,y,z)];
}
}
}
}
__global__
void rtm8(
const float*__restrict__ vsq,
const float*__restrict__ current_s,
const float*__restrict__ current_r,
float*__restrict__ next_s,
float*__restrict__ next_r,
float*__restrict__ image,
const float*__restrict__ a,
size_t N)
{
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned z = blockIdx.z * blockDim.z + threadIdx.z;
float div;
if ((4 <= x && x < (nx - 4) ) && (4 <= y && y < (ny - 4)) && (4 <= z && z < (nz - 4))){
div =
a[0] * current_s[indexTo1D(x,y,z)] +
a[1] * (current_s[indexTo1D(x+1,y,z)] + current_s[indexTo1D(x-1,y,z)] +
current_s[indexTo1D(x,y+1,z)] + current_s[indexTo1D(x,y-1,z)] +
current_s[indexTo1D(x,y,z+1)] + current_s[indexTo1D(x,y,z-1)]) +
a[2] * (current_s[indexTo1D(x+2,y,z)] + current_s[indexTo1D(x-2,y,z)] +
current_s[indexTo1D(x,y+2,z)] + current_s[indexTo1D(x,y-2,z)] +
current_s[indexTo1D(x,y,z+2)] + current_s[indexTo1D(x,y,z-2)]) +
a[3] * (current_s[indexTo1D(x+3,y,z)] + current_s[indexTo1D(x-3,y,z)] +
current_s[indexTo1D(x,y+3,z)] + current_s[indexTo1D(x,y-3,z)] +
current_s[indexTo1D(x,y,z+3)] + current_s[indexTo1D(x,y,z-3)]) +
a[4] * (current_s[indexTo1D(x+4,y,z)] + current_s[indexTo1D(x-4,y,z)] +
current_s[indexTo1D(x,y+4,z)] + current_s[indexTo1D(x,y-4,z)] +
current_s[indexTo1D(x,y,z+4)] + current_s[indexTo1D(x,y,z-4)]);
next_s[indexTo1D(x,y,z)] = 2*current_s[indexTo1D(x,y,z)] - next_s[indexTo1D(x,y,z)]
+ vsq[indexTo1D(x,y,z)]*div;
div =
a[0] * current_r[indexTo1D(x,y,z)] +
a[1] * (current_r[indexTo1D(x+1,y,z)] + current_r[indexTo1D(x-1,y,z)] +
current_r[indexTo1D(x,y+1,z)] + current_r[indexTo1D(x,y-1,z)] +
current_r[indexTo1D(x,y,z+1)] + current_r[indexTo1D(x,y,z-1)]) +
a[2] * (current_r[indexTo1D(x+2,y,z)] + current_r[indexTo1D(x-2,y,z)] +
current_r[indexTo1D(x,y+2,z)] + current_r[indexTo1D(x,y-2,z)] +
current_r[indexTo1D(x,y,z+2)] + current_r[indexTo1D(x,y,z-2)]) +
a[3] * (current_r[indexTo1D(x+3,y,z)] + current_r[indexTo1D(x-3,y,z)] +
current_r[indexTo1D(x,y+3,z)] + current_r[indexTo1D(x,y-3,z)] +
current_r[indexTo1D(x,y,z+3)] + current_r[indexTo1D(x,y,z-3)]) +
a[4] * (current_r[indexTo1D(x+4,y,z)] + current_r[indexTo1D(x-4,y,z)] +
current_r[indexTo1D(x,y+4,z)] + current_r[indexTo1D(x,y-4,z)] +
current_r[indexTo1D(x,y,z+4)] + current_r[indexTo1D(x,y,z-4)]);
next_r[indexTo1D(x,y,z)] = 2 * current_r[indexTo1D(x,y,z)]
- next_r[indexTo1D(x,y,z)] + vsq[indexTo1D(x,y,z)] * div;
image[indexTo1D(x,y,z)] = next_s[indexTo1D(x,y,z)] * next_r[indexTo1D(x,y,z)];
}
}
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
const int ArraySize = nx * ny * nz;
float* next_s = (float*)malloc(ArraySize * sizeof(float));
float* current_s = (float*)malloc(ArraySize * sizeof(float));
float* next_r = (float*)malloc(ArraySize * sizeof(float));
float* current_r = (float*)malloc(ArraySize * sizeof(float));
float* vsq = (float*)malloc(ArraySize * sizeof(float));
float* image_gpu = (float*)malloc(ArraySize * sizeof(float));
float* image_cpu = (float*)malloc(ArraySize * sizeof(float));
float a[5];
double pts, t0, t1, dt, flops, pt_rate, flop_rate, speedup, memory;
memory = ArraySize*sizeof(float)*6;
pts = (double)repeat*(nx-8)*(ny-8)*(nz-8);
flops = 67*pts;
printf("memory (MB) = %lf\n", memory/1e6);
printf("pts (billions) = %lf\n", pts/1e9);
printf("Tflops = %lf\n", flops/1e12);
// Initialization of matrix
a[0] = -1./560.;
a[1] = 8./315;
a[2] = -0.2;
a[3] = 1.6;
a[4] = -1435./504.;
for (int z = 0; z < nz; z++) {
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
vsq[indexTo1D(x,y,z)] = 1.0;
next_s[indexTo1D(x,y,z)] = 0;
current_s[indexTo1D(x,y,z)] = 1.0;
next_r[indexTo1D(x,y,z)] = 0;
current_r[indexTo1D(x,y,z)] = 1.0;
image_gpu[indexTo1D(x,y,z)] = image_cpu[indexTo1D(x,y,z)] = 0.5;
}
}
}
//allocate and copy matrix to device
float* vsq_d;
float* next_s_d;
float* current_s_d;
float* next_r_d;
float* current_r_d;
float* image_d;
float* a_d;
hipMalloc(&vsq_d, ArraySize * sizeof(float));
hipMalloc(&next_s_d, ArraySize * sizeof(float));
hipMalloc(¤t_s_d, ArraySize * sizeof(float));
hipMalloc(&next_r_d, ArraySize * sizeof(float));
hipMalloc(¤t_r_d, ArraySize * sizeof(float));
hipMalloc(&image_d, ArraySize * sizeof(float));
hipMalloc(&a_d, 5 * sizeof(float));
hipMemcpy(vsq_d, vsq, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(next_s_d, next_s, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(current_s_d, current_s, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(next_r_d, next_r, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(current_r_d, current_r, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(image_d, image_gpu, ArraySize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(a_d, a, 5 * sizeof(float), hipMemcpyHostToDevice);
int groupSize = 16;
int nx_pad = (nx + groupSize - 1) / groupSize ;
int ny_pad = (ny + groupSize - 1) / groupSize ;
int nz_pad = nz;
dim3 grids (nx_pad, ny_pad, nz_pad);
dim3 blocks (groupSize, groupSize, 1);
hipDeviceSynchronize();
t0 = mysecond();
// Launch the kernel repeatedly
for (int t = 0; t < repeat; t++) {
rtm8 <<<grids, blocks>>> (vsq_d, current_s_d, next_s_d, current_r_d,
next_r_d, image_d, a_d, ArraySize);
}
hipDeviceSynchronize();
t1 = mysecond();
dt = t1 - t0;
//copy back image value
hipMemcpy(image_gpu, image_d, ArraySize * sizeof(float), hipMemcpyDeviceToHost);
// CPU execution
t0 = mysecond();
for (int t = 0; t < repeat; t++) {
rtm8_cpu(vsq, current_s, next_s, current_r, next_r, image_cpu, a, ArraySize);
}
t1 = mysecond();
// verification
bool ok = true;
for (int i = 0; i < ArraySize; i++) {
if (fabsf(image_cpu[i] - image_gpu[i]) > 0.1) {
printf("@index %d host: %f device %f\n", i, image_cpu[i], image_gpu[i]);
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
pt_rate = pts/dt;
flop_rate = flops/dt;
speedup = (t1 - t0) / dt;
printf("dt = %lf\n", dt);
printf("pt_rate (millions/sec) = %lf\n", pt_rate/1e6);
printf("flop_rate (Gflops) = %lf\n", flop_rate/1e9);
printf("speedup over cpu = %lf\n", speedup);
printf("average kernel execution time = %lf (s)\n", dt / repeat);
//release arrays
free(vsq);
free(next_s);
free(current_s);
free(next_r);
free(current_r);
free(image_cpu);
free(image_gpu);
hipFree(vsq_d);
hipFree(next_s_d);
hipFree(current_s_d);
hipFree(next_r_d);
hipFree(current_r_d);
hipFree(image_d);
hipFree(a_d);
return 0;
}
|
4fb6fbee42e9266c689d29b53e7508ef924f5af9.hip | // !!! This is a file automatically generated by hipify!!!
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <petsc/private/cudavecimpl.h>
/*MC
VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise.
Options Database Keys:
. -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
Vec_MPI *vecmpi = (Vec_MPI*)v->data;
Vec_CUDA *veccuda;
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (v->spptr) {
veccuda = (Vec_CUDA*)v->spptr;
if (veccuda->GPUarray_allocated) {
err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
veccuda->GPUarray_allocated = NULL;
}
if (veccuda->stream) {
err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
if (v->pinned_memory) {
ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr);
ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr);
ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr);
v->pinned_memory = PETSC_FALSE;
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRMPI(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRMPI(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- cpuarray - the user provided CPU array to store the vector values
- gpuarray - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
If both cpuarray and gpuarray are provided, the caller must ensure that
the provided arrays have identical values.
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
PETSc does NOT free the provided arrays when the vector is destroyed via
VecDestroy(). The user should not free the array until the vector is
destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(),
VecCUDAAllocateCheckHost()
@*/
PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr);
if (cpuarray && gpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_BOTH;
} else if (cpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_CPU;
} else if (gpuarray) {
(*vv)->offloadmask = PETSC_OFFLOAD_GPU;
} else {
(*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMax_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMax_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MAXLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMin_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMin_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MINLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->boundtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
V->ops->max = VecMax_MPI;
V->ops->min = VecMin_MPI;
V->ops->reciprocal = VecReciprocal_Default;
V->ops->sum = NULL;
V->ops->shift = NULL;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCRANDER48,&V->defaultrandtype);CHKERRQ(ierr);
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
V->ops->getarray = VecGetArray_SeqCUDA;
V->ops->restorearray = VecRestoreArray_SeqCUDA;
V->ops->getarrayandmemtype = VecGetArrayAndMemType_SeqCUDA;
V->ops->restorearrayandmemtype = VecRestoreArrayAndMemType_SeqCUDA;
V->ops->max = VecMax_MPICUDA;
V->ops->min = VecMin_MPICUDA;
V->ops->reciprocal = VecReciprocal_SeqCUDA;
V->ops->sum = VecSum_SeqCUDA;
V->ops->shift = VecShift_SeqCUDA;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCCURAND,&V->defaultrandtype);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->bindtocpu = VecBindToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
}
if (array) {
if (!vv->spptr) {
PetscReal pinned_memory_min;
PetscBool flag;
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscCalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
vv->minimum_bytes_pinned_memory = 0;
/* Need to parse command line for minimum size to use for pinned memory allocations on host here.
Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr);
pinned_memory_min = vv->minimum_bytes_pinned_memory;
ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr);
if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min;
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
vv->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
| 4fb6fbee42e9266c689d29b53e7508ef924f5af9.cu |
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <petsc/private/cudavecimpl.h>
/*MC
VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise.
Options Database Keys:
. -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
Vec_MPI *vecmpi = (Vec_MPI*)v->data;
Vec_CUDA *veccuda;
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (v->spptr) {
veccuda = (Vec_CUDA*)v->spptr;
if (veccuda->GPUarray_allocated) {
err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
veccuda->GPUarray_allocated = NULL;
}
if (veccuda->stream) {
err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
}
if (v->pinned_memory) {
ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr);
ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr);
ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr);
v->pinned_memory = PETSC_FALSE;
}
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRMPI(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRMPI(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- cpuarray - the user provided CPU array to store the vector values
- gpuarray - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
If both cpuarray and gpuarray are provided, the caller must ensure that
the provided arrays have identical values.
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
PETSc does NOT free the provided arrays when the vector is destroyed via
VecDestroy(). The user should not free the array until the vector is
destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(),
VecCUDAAllocateCheckHost()
@*/
PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr);
if (cpuarray && gpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_BOTH;
} else if (cpuarray) {
Vec_MPI *s = (Vec_MPI*)((*vv)->data);
s->array = (PetscScalar*)cpuarray;
(*vv)->offloadmask = PETSC_OFFLOAD_CPU;
} else if (gpuarray) {
(*vv)->offloadmask = PETSC_OFFLOAD_GPU;
} else {
(*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMax_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMax_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MAXLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecMin_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z)
{
PetscErrorCode ierr;
PetscReal work;
PetscFunctionBegin;
ierr = VecMin_SeqCUDA(xin,idx,&work);CHKERRQ(ierr);
if (!idx) {
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
} else {
struct { PetscReal v; PetscInt i; } in,out;
in.v = work;
in.i = *idx + xin->map->rstart;
ierr = MPIU_Allreduce(&in,&out,1,MPIU_REAL_INT,MPIU_MINLOC,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr);
*z = out.v;
*idx = out.i;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->boundtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
V->ops->max = VecMax_MPI;
V->ops->min = VecMin_MPI;
V->ops->reciprocal = VecReciprocal_Default;
V->ops->sum = NULL;
V->ops->shift = NULL;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCRANDER48,&V->defaultrandtype);CHKERRQ(ierr);
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
V->ops->getarray = VecGetArray_SeqCUDA;
V->ops->restorearray = VecRestoreArray_SeqCUDA;
V->ops->getarrayandmemtype = VecGetArrayAndMemType_SeqCUDA;
V->ops->restorearrayandmemtype = VecRestoreArrayAndMemType_SeqCUDA;
V->ops->max = VecMax_MPICUDA;
V->ops->min = VecMin_MPICUDA;
V->ops->reciprocal = VecReciprocal_SeqCUDA;
V->ops->sum = VecSum_SeqCUDA;
V->ops->shift = VecShift_SeqCUDA;
/* default random number generator */
ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr);
ierr = PetscStrallocpy(PETSCCURAND,&V->defaultrandtype);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->bindtocpu = VecBindToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr);
vv->offloadmask = PETSC_OFFLOAD_BOTH;
}
if (array) {
if (!vv->spptr) {
PetscReal pinned_memory_min;
PetscBool flag;
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscCalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
vv->minimum_bytes_pinned_memory = 0;
/* Need to parse command line for minimum size to use for pinned memory allocations on host here.
Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */
ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr);
pinned_memory_min = vv->minimum_bytes_pinned_memory;
ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr);
if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min;
ierr = PetscOptionsEnd();CHKERRQ(ierr);
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
vv->offloadmask = PETSC_OFFLOAD_GPU;
}
PetscFunctionReturn(0);
}
|
b32eba03c37270ec5ba6a62f59b4efd48abb2dd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transform.h>
__device__ float op(float d1,float *params) {
return fabsf(d1);
}
extern "C"
__global__ void abs_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
| b32eba03c37270ec5ba6a62f59b4efd48abb2dd0.cu | #include <transform.h>
__device__ float op(float d1,float *params) {
return fabsf(d1);
}
extern "C"
__global__ void abs_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
|
3e9cf0ed8b34473283f8bb4a4f369bea43356a09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "main.h"
// phiX changes from -dp3*N*d/h to dp3*N*d/h which sould be from -\pi to +\pi
//#define NX 6000
#define NX 3000
#define dPHIx 0.0010471975511966
#define TWO_dPHIx (2*dPHIx)
#define NY 700
#define dPHIy 0.000585398163397448 // also changes from -\pi to +\pi
#define TWO_dPHIy (2*dPHIy)
#define dT 0.00001
#define COLUMN_COUNT (2*(NX)+1)
#define ROW_COUNT (2*(NY)+1)
// supperlattice period
#define d 0.1
// width of the miniband
#define Delta_nu 1.0
__device__ ffloat Edc, omega, E_omega;
#define SIN(x) sin(x)
#define COS(x) cos(x)
#define eE(t) (e*(E_omega*COS(omega*(t))+Edc))
#define host_eE(t) (e*(host_E_omega*COS(host_omega*(t))+host_E_dc))
#define phiX(m) (dPHIx*(m-NX))
#define phiY(n) (dPHIy*(n-NY))
#define Offset(m,n) ((m)*ROW_COUNT+(n))
// maxwell distribution function
ffloat f0(ffloat, ffloat);
ffloat I0, f0_alpha, f0_a;
__global__ void solve(ffloat *f_current, ffloat *f_next, ffloat *f_0, ffloat A, ffloat B, ffloat C);
//__global__ void solve(ffloat *, ffloat *, ffloat *, ffloat *, ffloat *, ffloat, ffloat, ffloat, ffloat);
int main(int argc, char *argv[]) {
int display = atoi(argv[1]);
ffloat host_E_dc = strtod(argv[2], NULL);
ffloat host_E_omega = strtod(argv[3], NULL);
ffloat host_omega = strtod(argv[4], NULL);
ffloat host_B = strtod(argv[5], NULL);
printf("# T=%0.20f, E_dc=%0.20f B=%0.20f\n", T, host_E_dc, host_B); sync();
I0=gsl_sf_bessel_I0(Delta_nu/(2*k*T));
f0_a=1/(2*PI*I0);
f0_alpha=Delta_nu/(2*k*T);
ffloat *host_f[3];
for( int i = 0; i < 3; i++ ) {
host_f[i] = (ffloat *)calloc(COLUMN_COUNT * ROW_COUNT, sizeof(ffloat));
}
for( int m=0; m <= 2*NX; m++ ) { // column
for( int n=0; n < 2*NY; n++ ) { // row
// fill in f[current] with default maxwell distribution of momentum
host_f[0][m*ROW_COUNT+n]=f0(phiX(m), phiY(n));
host_f[2][m*ROW_COUNT+n]=f0(phiX(m), phiY(n));
}
}
if( display == 1 ) {
// show initial distribution
for( int m=0; m <= 2*NX; m += 3 ) { // column
for( int n=0; n < 2*NY; n += 3 ) { // row
printf("%0.20f %0.20f %0.20f\n", phiX(m), phiY(n), host_f[0][m*ROW_COUNT+n]);
}
}
return 1;
}
// allocate memory on device
ffloat *f[3];
int vsize = COLUMN_COUNT * sizeof(ffloat) * ROW_COUNT;
for( int i = 0; i < 3; i++ ) {
HANDLE_ERROR(hipMalloc((void **)&(f[i]), vsize));
HANDLE_ERROR(hipMemcpy(f[i], host_f[i], vsize, hipMemcpyHostToDevice));
}
dim3 grid(2*NX+1, 2*NY+1);
int current = 0; int next = 1;
ffloat A = host_B * d * d * Delta_nu * mE * dT / (4 * h * h * dPHIy);
ffloat C = host_B * dT / ( 2 * dPHIx);
ffloat t=0;
ffloat tt=0;
char fname[1024];
for( long i = 0; t < 5; i++ ) {
ffloat B = host_eE(t) * dT / (2 * dPHIx);
if( i % 1000000 ) {
printf("# t=%0.20f\n", t); sync();
}
hipLaunchKernelGGL(( solve), dim3(grid),dim3(1), 0, 0, f[current], f[next], f[2], A, B, C);
if( current == 0 ) { current = 1; next = 0; } else { current = 0; next = 1; }
t += dT;
tt += dT;
/*
if( tt > 0.1 ) {
tt = 0;
sprintf(fname, "/home/priimak/projects/2dssl/data/f_E_dc=%f_E_omega=%f_omega=%f_B=%f_T=%f_t=%f.data",
host_E_dc, host_E_omega, host_omega, host_B, T, t);
FILE *fout=fopen((const char *)fname, "w");
fprintf(fout, "# T=%0.20f, E_dc=%0.20f B=%0.20f\n", T, host_E_dc, host_B); sync();
fprintf(fout, "\n# t=%0.20f\n", t);
HANDLE_ERROR(hipMemcpy(host_f[0], f[current], vsize, hipMemcpyDeviceToHost));
// show resulting distribution
for( int m=0; m <= 2*NX; m += 3 ) { // column
for( int n=0; n < 2*NY; n += 3 ) { // row
//if( host_f[0][Offset(m,n)] > 0.01 ) {
fprintf(fout, "%0.20f %0.20f %0.20f\n", phiX(m), phiY(n), host_f[0][Offset(m,n)]);
//}
}
}
fclose(fout);
}
*/
}
//hipDeviceSynchronize();
if( display == 2 ) {
sprintf(fname, "/home/priimak/projects/2dssl/data/f_E_dc=%f_E_omega=%f_omega=%f_B=%f_T=%f_t=%f.data",
host_E_dc, host_E_omega, host_omega, host_B, T, t);
FILE *fout=fopen((const char *)fname, "w");
fprintf(fout, "# T=%0.20f, E_dc=%0.20f B=%0.20f\n", T, host_E_dc, host_B); sync();
fprintf(fout, "\n# t=%0.20f\n", t);
HANDLE_ERROR(hipMemcpy(host_f[0], f[current], vsize, hipMemcpyDeviceToHost));
// show resulting distribution
printf("\n# t=%0.20f\n", t);
for( int m=0; m <= 2*NX; m += 3 ) { // column
for( int n=0; n < 2*NY; n += 3 ) { // row
//if( host_f[0][Offset(m,n)] > 0.01 ) {
fprintf(fout, "%0.20f %0.20f %0.20f\n", phiX(m), phiY(n), host_f[0][Offset(m,n)]);
//}
}
}
fclose(fout);
}
return 0;
}
__global__ void solve(ffloat *f_current, ffloat *f_next, ffloat *f_0, ffloat A, ffloat B, ffloat C)
{
int m = blockIdx.x; // column X along E field
int n = blockIdx.y; // row Y along B field
ffloat f_current_m_n_minus_1 = n == 0 ? 0 : f_current[Offset(m,n-1)];
ffloat f_current_m_n_plus_1 = n == (2*NY) ? 0 : f_current[Offset(m,n+1)];
ffloat f_current_m_plus_1_n = m == (2*NX) ? f_current[Offset(0,n)] : f_current[Offset(m+1,n)];
ffloat f_current_m_minus_1_n = m == 0 ? f_current[Offset(2*NX,n)] : f_current[Offset(m-1,n)];
f_next[Offset(m,n)] = (f_current_m_plus_1_n+f_current_m_minus_1_n+f_current_m_n_plus_1+f_current_m_n_minus_1)*(1-dT)/4
+ dT*f_0[Offset(m,n)] + A * sin(phiX(m))*(f_current_m_n_plus_1 - f_current_m_n_minus_1)
- (B + C * phiY(n))*(f_current_m_plus_1_n - f_current_m_minus_1_n);
} // end of solve(...)
ffloat f0(ffloat phiX, ffloat phiY) {
return f0_a*exp(f0_alpha*COS(phiX)-(h*h/(2*mE*d*d*k*T))*phiY*phiY);
}
| 3e9cf0ed8b34473283f8bb4a4f369bea43356a09.cu | #include "main.h"
// phiX changes from -dp3*N*d/h to dp3*N*d/h which sould be from -\pi to +\pi
//#define NX 6000
#define NX 3000
#define dPHIx 0.0010471975511966
#define TWO_dPHIx (2*dPHIx)
#define NY 700
#define dPHIy 0.000585398163397448 // also changes from -\pi to +\pi
#define TWO_dPHIy (2*dPHIy)
#define dT 0.00001
#define COLUMN_COUNT (2*(NX)+1)
#define ROW_COUNT (2*(NY)+1)
// supperlattice period
#define d 0.1
// width of the miniband
#define Delta_nu 1.0
__device__ ffloat Edc, omega, E_omega;
#define SIN(x) sin(x)
#define COS(x) cos(x)
#define eE(t) (e*(E_omega*COS(omega*(t))+Edc))
#define host_eE(t) (e*(host_E_omega*COS(host_omega*(t))+host_E_dc))
#define phiX(m) (dPHIx*(m-NX))
#define phiY(n) (dPHIy*(n-NY))
#define Offset(m,n) ((m)*ROW_COUNT+(n))
// maxwell distribution function
ffloat f0(ffloat, ffloat);
ffloat I0, f0_alpha, f0_a;
__global__ void solve(ffloat *f_current, ffloat *f_next, ffloat *f_0, ffloat A, ffloat B, ffloat C);
//__global__ void solve(ffloat *, ffloat *, ffloat *, ffloat *, ffloat *, ffloat, ffloat, ffloat, ffloat);
int main(int argc, char *argv[]) {
int display = atoi(argv[1]);
ffloat host_E_dc = strtod(argv[2], NULL);
ffloat host_E_omega = strtod(argv[3], NULL);
ffloat host_omega = strtod(argv[4], NULL);
ffloat host_B = strtod(argv[5], NULL);
printf("# T=%0.20f, E_dc=%0.20f B=%0.20f\n", T, host_E_dc, host_B); sync();
I0=gsl_sf_bessel_I0(Delta_nu/(2*k*T));
f0_a=1/(2*PI*I0);
f0_alpha=Delta_nu/(2*k*T);
ffloat *host_f[3];
for( int i = 0; i < 3; i++ ) {
host_f[i] = (ffloat *)calloc(COLUMN_COUNT * ROW_COUNT, sizeof(ffloat));
}
for( int m=0; m <= 2*NX; m++ ) { // column
for( int n=0; n < 2*NY; n++ ) { // row
// fill in f[current] with default maxwell distribution of momentum
host_f[0][m*ROW_COUNT+n]=f0(phiX(m), phiY(n));
host_f[2][m*ROW_COUNT+n]=f0(phiX(m), phiY(n));
}
}
if( display == 1 ) {
// show initial distribution
for( int m=0; m <= 2*NX; m += 3 ) { // column
for( int n=0; n < 2*NY; n += 3 ) { // row
printf("%0.20f %0.20f %0.20f\n", phiX(m), phiY(n), host_f[0][m*ROW_COUNT+n]);
}
}
return 1;
}
// allocate memory on device
ffloat *f[3];
int vsize = COLUMN_COUNT * sizeof(ffloat) * ROW_COUNT;
for( int i = 0; i < 3; i++ ) {
HANDLE_ERROR(cudaMalloc((void **)&(f[i]), vsize));
HANDLE_ERROR(cudaMemcpy(f[i], host_f[i], vsize, cudaMemcpyHostToDevice));
}
dim3 grid(2*NX+1, 2*NY+1);
int current = 0; int next = 1;
ffloat A = host_B * d * d * Delta_nu * mE * dT / (4 * h * h * dPHIy);
ffloat C = host_B * dT / ( 2 * dPHIx);
ffloat t=0;
ffloat tt=0;
char fname[1024];
for( long i = 0; t < 5; i++ ) {
ffloat B = host_eE(t) * dT / (2 * dPHIx);
if( i % 1000000 ) {
printf("# t=%0.20f\n", t); sync();
}
solve<<<grid,1>>>(f[current], f[next], f[2], A, B, C);
if( current == 0 ) { current = 1; next = 0; } else { current = 0; next = 1; }
t += dT;
tt += dT;
/*
if( tt > 0.1 ) {
tt = 0;
sprintf(fname, "/home/priimak/projects/2dssl/data/f_E_dc=%f_E_omega=%f_omega=%f_B=%f_T=%f_t=%f.data",
host_E_dc, host_E_omega, host_omega, host_B, T, t);
FILE *fout=fopen((const char *)fname, "w");
fprintf(fout, "# T=%0.20f, E_dc=%0.20f B=%0.20f\n", T, host_E_dc, host_B); sync();
fprintf(fout, "\n# t=%0.20f\n", t);
HANDLE_ERROR(cudaMemcpy(host_f[0], f[current], vsize, cudaMemcpyDeviceToHost));
// show resulting distribution
for( int m=0; m <= 2*NX; m += 3 ) { // column
for( int n=0; n < 2*NY; n += 3 ) { // row
//if( host_f[0][Offset(m,n)] > 0.01 ) {
fprintf(fout, "%0.20f %0.20f %0.20f\n", phiX(m), phiY(n), host_f[0][Offset(m,n)]);
//}
}
}
fclose(fout);
}
*/
}
//cudaDeviceSynchronize();
if( display == 2 ) {
sprintf(fname, "/home/priimak/projects/2dssl/data/f_E_dc=%f_E_omega=%f_omega=%f_B=%f_T=%f_t=%f.data",
host_E_dc, host_E_omega, host_omega, host_B, T, t);
FILE *fout=fopen((const char *)fname, "w");
fprintf(fout, "# T=%0.20f, E_dc=%0.20f B=%0.20f\n", T, host_E_dc, host_B); sync();
fprintf(fout, "\n# t=%0.20f\n", t);
HANDLE_ERROR(cudaMemcpy(host_f[0], f[current], vsize, cudaMemcpyDeviceToHost));
// show resulting distribution
printf("\n# t=%0.20f\n", t);
for( int m=0; m <= 2*NX; m += 3 ) { // column
for( int n=0; n < 2*NY; n += 3 ) { // row
//if( host_f[0][Offset(m,n)] > 0.01 ) {
fprintf(fout, "%0.20f %0.20f %0.20f\n", phiX(m), phiY(n), host_f[0][Offset(m,n)]);
//}
}
}
fclose(fout);
}
return 0;
}
__global__ void solve(ffloat *f_current, ffloat *f_next, ffloat *f_0, ffloat A, ffloat B, ffloat C)
{
int m = blockIdx.x; // column X along E field
int n = blockIdx.y; // row Y along B field
ffloat f_current_m_n_minus_1 = n == 0 ? 0 : f_current[Offset(m,n-1)];
ffloat f_current_m_n_plus_1 = n == (2*NY) ? 0 : f_current[Offset(m,n+1)];
ffloat f_current_m_plus_1_n = m == (2*NX) ? f_current[Offset(0,n)] : f_current[Offset(m+1,n)];
ffloat f_current_m_minus_1_n = m == 0 ? f_current[Offset(2*NX,n)] : f_current[Offset(m-1,n)];
f_next[Offset(m,n)] = (f_current_m_plus_1_n+f_current_m_minus_1_n+f_current_m_n_plus_1+f_current_m_n_minus_1)*(1-dT)/4
+ dT*f_0[Offset(m,n)] + A * sin(phiX(m))*(f_current_m_n_plus_1 - f_current_m_n_minus_1)
- (B + C * phiY(n))*(f_current_m_plus_1_n - f_current_m_minus_1_n);
} // end of solve(...)
ffloat f0(ffloat phiX, ffloat phiY) {
return f0_a*exp(f0_alpha*COS(phiX)-(h*h/(2*mE*d*d*k*T))*phiY*phiY);
}
|
0b3fe1b0e87029ae093b54544e2e7b88f29bd246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
#include "../shared/stopwatch.h"
int main(int argc, char** argv)
{
hipFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
float initPR = 0.15;
float acc = 0.01;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.delta[i] = initPR;
graph.value[i] = 0;
}
//graph.value[arguments.sourceNode] = 0;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(hipMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_delta, graph.delta, graph.num_nodes * sizeof(float), hipMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
subgen.generate(graph, subgraph, acc);
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
Stopwatch copyTimer;
Stopwatch computeTimer;
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
copyTimer.start();
hipDeviceSynchronize();
gpuErrorcheck(hipMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), hipMemcpyHostToDevice));
hipDeviceSynchronize();
copyTimer.stop();
computeTimer.start();
hipLaunchKernelGGL(( pr_kernel), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
graph.d_delta,
acc);
hipDeviceSynchronize();
computeTimer.stop();
gpuErrorcheck( hipPeekAtLastError() );
}
subgen.generate(graph, subgraph, acc);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
cout << "Number of iterations = " << gItr << endl;
cout << "compute time: " << computeTimer.total() << "ns copy time: " << copyTimer.total() << "ns\n";
gpuErrorcheck(hipMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), hipMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
| 0b3fe1b0e87029ae093b54544e2e7b88f29bd246.cu | #include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
#include "../shared/stopwatch.h"
int main(int argc, char** argv)
{
cudaFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
float initPR = 0.15;
float acc = 0.01;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.delta[i] = initPR;
graph.value[i] = 0;
}
//graph.value[arguments.sourceNode] = 0;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(cudaMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_delta, graph.delta, graph.num_nodes * sizeof(float), cudaMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
subgen.generate(graph, subgraph, acc);
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
Stopwatch copyTimer;
Stopwatch computeTimer;
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
copyTimer.start();
cudaDeviceSynchronize();
gpuErrorcheck(cudaMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
copyTimer.stop();
computeTimer.start();
pr_kernel<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
graph.d_delta,
acc);
cudaDeviceSynchronize();
computeTimer.stop();
gpuErrorcheck( cudaPeekAtLastError() );
}
subgen.generate(graph, subgraph, acc);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
cout << "Number of iterations = " << gItr << endl;
cout << "compute time: " << computeTimer.total() << "ns copy time: " << copyTimer.total() << "ns\n";
gpuErrorcheck(cudaMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), cudaMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
8ddba52ceb5a476dd79c00571fbf5b7ed7845ef1.hip | // !!! This is a file automatically generated by hipify!!!
/*notice this only works when array size is not larger than block size*/
#include <stdio.h>
#include <stdlib.h>
#include "common.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//reduction neighbored pairs kernel
__global__ void redunction_neighbored_pairs(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
//local data block pointer
//int * i_data = input + blockDim.x * blockIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x; offset *= 2)
{
//input[gid*offset*2] += input[gid*offset*2 + offset]
int index = 2 * offset * tid;
if(gid*offset*2<size){
//input[gid*offset*2] += input[gid*offset*2 + offset];
printf("block id is: %d current offset is: %d current thread id is: %d current index is: %d and input value is: %d and %d\n",
blockIdx.x,
offset,
gid,
gid*offset*2,
input[gid*offset*2],
input[gid*offset*2 + offset]);
input[gid*offset*2] += input[gid*offset*2 + offset];
if(offset == blockDim.x){
printf("current tid/gid is: %d, %d, final value is: %d\n", tid, gid, input[gid*offset*2]);
}
}
//synchronize all threads inside one block
__syncthreads();
}
//for each block, element that is assigned to the first core/thread of block will be the
//sum value of this block
if (tid == 0 && gid*2 <size)
{
//printf("final output value is: %d\n",input[gid]);
temp[blockIdx.x] = input[gid];
if(blockIdx.x == 1){
printf("current block id and output value is: %d, %d\n", blockIdx.x, temp[blockIdx.x]);
}
//printf("current block id is: %d, current gid is: %d, temp[%d] = %d\n",blockIdx.x,gid,blockIdx.x,temp[blockIdx.x]);
}
}
int main(int argc, char ** argv)
{
printf("Running neighbored pairs reduction kernel \n");
//
//int size = 1 << 27; //128 Mb of data
int size = 1024;
int byte_size = size * sizeof(int);
int block_size = 1024;
//
int * cpu_input, *h_ref;
cpu_input = (int*)malloc(byte_size);
//
initialize(cpu_input, size, INIT_RANDOM);
//
// //get the reduction result from cpu
int cpu_result = accumulate_cpu(cpu_input,size);
//
dim3 block(block_size);
dim3 grid((size+block.x-1)/ block.x);
//
printf("Kernel launch parameters | grid.x : %d, block.x : %d \n",grid.x, block.x);
//
//prepare pointer to collect sum for each block
int block_byte_size = sizeof(int)* grid.x;
h_ref = (int*)malloc(block_byte_size);
//
int * gpu_input, *g_ref;
//
hipMalloc((void**)&gpu_input,byte_size);
hipMalloc((void**)&g_ref, block_byte_size);
//
hipMemset(g_ref, 0, block_byte_size);
hipMemcpy(gpu_input, cpu_input, byte_size, hipMemcpyHostToDevice);
//
hipLaunchKernelGGL(( redunction_neighbored_pairs) , dim3(grid), dim3(block) , 0, 0, gpu_input, g_ref, size);
//
hipDeviceSynchronize();
//
hipMemcpy(h_ref, g_ref, block_byte_size, hipMemcpyDeviceToHost);
//
int gpu_result = 0;
//
for (int i = 0; i < grid.x; i++)
{
printf("current index and h_ref value is: %d, %d\n", i, h_ref[i]);
gpu_result += h_ref[i];
}
//
// //validity check
compare_results(gpu_result, cpu_result);
//
hipFree(g_ref);
hipFree(gpu_input);
//
free(h_ref);
free(cpu_input);
//
hipDeviceReset();
return 0;
}
| 8ddba52ceb5a476dd79c00571fbf5b7ed7845ef1.cu | /*notice this only works when array size is not larger than block size*/
#include <stdio.h>
#include <stdlib.h>
#include "common.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//reduction neighbored pairs kernel
__global__ void redunction_neighbored_pairs(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
//local data block pointer
//int * i_data = input + blockDim.x * blockIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x; offset *= 2)
{
//input[gid*offset*2] += input[gid*offset*2 + offset]
int index = 2 * offset * tid;
if(gid*offset*2<size){
//input[gid*offset*2] += input[gid*offset*2 + offset];
printf("block id is: %d current offset is: %d current thread id is: %d current index is: %d and input value is: %d and %d\n",
blockIdx.x,
offset,
gid,
gid*offset*2,
input[gid*offset*2],
input[gid*offset*2 + offset]);
input[gid*offset*2] += input[gid*offset*2 + offset];
if(offset == blockDim.x){
printf("current tid/gid is: %d, %d, final value is: %d\n", tid, gid, input[gid*offset*2]);
}
}
//synchronize all threads inside one block
__syncthreads();
}
//for each block, element that is assigned to the first core/thread of block will be the
//sum value of this block
if (tid == 0 && gid*2 <size)
{
//printf("final output value is: %d\n",input[gid]);
temp[blockIdx.x] = input[gid];
if(blockIdx.x == 1){
printf("current block id and output value is: %d, %d\n", blockIdx.x, temp[blockIdx.x]);
}
//printf("current block id is: %d, current gid is: %d, temp[%d] = %d\n",blockIdx.x,gid,blockIdx.x,temp[blockIdx.x]);
}
}
int main(int argc, char ** argv)
{
printf("Running neighbored pairs reduction kernel \n");
//
//int size = 1 << 27; //128 Mb of data
int size = 1024;
int byte_size = size * sizeof(int);
int block_size = 1024;
//
int * cpu_input, *h_ref;
cpu_input = (int*)malloc(byte_size);
//
initialize(cpu_input, size, INIT_RANDOM);
//
// //get the reduction result from cpu
int cpu_result = accumulate_cpu(cpu_input,size);
//
dim3 block(block_size);
dim3 grid((size+block.x-1)/ block.x);
//
printf("Kernel launch parameters | grid.x : %d, block.x : %d \n",grid.x, block.x);
//
//prepare pointer to collect sum for each block
int block_byte_size = sizeof(int)* grid.x;
h_ref = (int*)malloc(block_byte_size);
//
int * gpu_input, *g_ref;
//
cudaMalloc((void**)&gpu_input,byte_size);
cudaMalloc((void**)&g_ref, block_byte_size);
//
cudaMemset(g_ref, 0, block_byte_size);
cudaMemcpy(gpu_input, cpu_input, byte_size, cudaMemcpyHostToDevice);
//
redunction_neighbored_pairs <<<grid, block >>>(gpu_input, g_ref, size);
//
cudaDeviceSynchronize();
//
cudaMemcpy(h_ref, g_ref, block_byte_size, cudaMemcpyDeviceToHost);
//
int gpu_result = 0;
//
for (int i = 0; i < grid.x; i++)
{
printf("current index and h_ref value is: %d, %d\n", i, h_ref[i]);
gpu_result += h_ref[i];
}
//
// //validity check
compare_results(gpu_result, cpu_result);
//
cudaFree(g_ref);
cudaFree(gpu_input);
//
free(h_ref);
free(cpu_input);
//
cudaDeviceReset();
return 0;
}
|
1a899b443587dd93f1814f98074bfdfcdba4d2ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hip/hip_cooperative_groups.h>
#include "reduction.h"
using namespace cooperative_groups;
#define NUM_LOAD 4
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/**
Two warp level primitives are used here for this example
https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
https://devblogs.nvidia.com/using-cuda-warp-level-primitives/
Disadvantage in this approaches is floating point reduction will not be exact from run to run.
https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html
*/
template <typename group_t>
__inline__ __device__ float warp_reduce_sum(group_t group, float val)
{
#pragma unroll
for (int offset = group.size() / 2; offset > 0; offset >>= 1)
val += group.shfl_down(val, offset);
return val;
}
__inline__ __device__ float block_reduce_sum(thread_block block, float val)
{
static __shared__ float shared[32]; // Shared mem for 32 partial sums
int wid = threadIdx.x / warpSize;
thread_block_tile<32> tile32 = tiled_partition<32>(block);
val = warp_reduce_sum(tile32, val); // Each warp performs partial reduction
if (tile32.thread_rank() == 0)
shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[tile32.thread_rank()] : 0;
if (wid == 0)
val = warp_reduce_sum(tile32, val); //Final reduce within first warp
return val;
}
// large vector reduction
__global__ void
reduction_blk_atmc_kernel(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
thread_block block = this_thread_block();
// cumulates input with grid-stride loop and save to share memory
float sum[NUM_LOAD] = { 0.f };
for (int i = idx_x; i < size; i += blockDim.x * gridDim.x * NUM_LOAD)
{
for (int step = 0; step < NUM_LOAD; step++)
sum[step] += (i + step * blockDim.x * gridDim.x < size) ? g_in[i + step * blockDim.x * gridDim.x] : 0.f;
}
for (int i = 1; i < NUM_LOAD; i++)
sum[0] += sum[i];
// warp synchronous reduction
sum[0] = block_reduce_sum(block, sum[0]);
if (block.thread_rank() == 0) {
atomicAdd(&g_out[0], sum[0]);
}
}
void atomic_reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads)
{
int num_sms;
int num_blocks_per_sm;
hipDeviceGetAttribute(&num_sms, hipDeviceAttributeMultiprocessorCount, 0);
hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, reduction_blk_atmc_kernel, n_threads, n_threads*sizeof(float));
int n_blocks = min(num_blocks_per_sm * num_sms, (size + n_threads - 1) / n_threads);
hipLaunchKernelGGL(( reduction_blk_atmc_kernel), dim3(n_blocks), dim3(n_threads), 0, 0, g_outPtr, g_inPtr, size);
}
| 1a899b443587dd93f1814f98074bfdfcdba4d2ba.cu | #include <stdio.h>
#include <cooperative_groups.h>
#include "reduction.h"
using namespace cooperative_groups;
#define NUM_LOAD 4
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/**
Two warp level primitives are used here for this example
https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
https://devblogs.nvidia.com/using-cuda-warp-level-primitives/
Disadvantage in this approaches is floating point reduction will not be exact from run to run.
https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html
*/
template <typename group_t>
__inline__ __device__ float warp_reduce_sum(group_t group, float val)
{
#pragma unroll
for (int offset = group.size() / 2; offset > 0; offset >>= 1)
val += group.shfl_down(val, offset);
return val;
}
__inline__ __device__ float block_reduce_sum(thread_block block, float val)
{
static __shared__ float shared[32]; // Shared mem for 32 partial sums
int wid = threadIdx.x / warpSize;
thread_block_tile<32> tile32 = tiled_partition<32>(block);
val = warp_reduce_sum(tile32, val); // Each warp performs partial reduction
if (tile32.thread_rank() == 0)
shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[tile32.thread_rank()] : 0;
if (wid == 0)
val = warp_reduce_sum(tile32, val); //Final reduce within first warp
return val;
}
// large vector reduction
__global__ void
reduction_blk_atmc_kernel(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
thread_block block = this_thread_block();
// cumulates input with grid-stride loop and save to share memory
float sum[NUM_LOAD] = { 0.f };
for (int i = idx_x; i < size; i += blockDim.x * gridDim.x * NUM_LOAD)
{
for (int step = 0; step < NUM_LOAD; step++)
sum[step] += (i + step * blockDim.x * gridDim.x < size) ? g_in[i + step * blockDim.x * gridDim.x] : 0.f;
}
for (int i = 1; i < NUM_LOAD; i++)
sum[0] += sum[i];
// warp synchronous reduction
sum[0] = block_reduce_sum(block, sum[0]);
if (block.thread_rank() == 0) {
atomicAdd(&g_out[0], sum[0]);
}
}
void atomic_reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads)
{
int num_sms;
int num_blocks_per_sm;
cudaDeviceGetAttribute(&num_sms, cudaDevAttrMultiProcessorCount, 0);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_per_sm, reduction_blk_atmc_kernel, n_threads, n_threads*sizeof(float));
int n_blocks = min(num_blocks_per_sm * num_sms, (size + n_threads - 1) / n_threads);
reduction_blk_atmc_kernel<<<n_blocks, n_threads>>>(g_outPtr, g_inPtr, size);
}
|
3dab85b817ad5cde783765b7bf448ddc1bf436f5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cunn_LookupTable_accGradParametersKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *indices = NULL;
hipMalloc(&indices, XSIZE*YSIZE);
float *gradOutput = NULL;
hipMalloc(&gradOutput, XSIZE*YSIZE);
float *gradWeight = NULL;
hipMalloc(&gradWeight, XSIZE*YSIZE);
float *count = NULL;
hipMalloc(&count, XSIZE*YSIZE);
float defaultScale = 2;
long numel = 1;
long stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cunn_LookupTable_accGradParametersKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,indices,gradOutput,gradWeight,count,defaultScale,numel,stride);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cunn_LookupTable_accGradParametersKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,indices,gradOutput,gradWeight,count,defaultScale,numel,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cunn_LookupTable_accGradParametersKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,indices,gradOutput,gradWeight,count,defaultScale,numel,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3dab85b817ad5cde783765b7bf448ddc1bf436f5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cunn_LookupTable_accGradParametersKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *indices = NULL;
cudaMalloc(&indices, XSIZE*YSIZE);
float *gradOutput = NULL;
cudaMalloc(&gradOutput, XSIZE*YSIZE);
float *gradWeight = NULL;
cudaMalloc(&gradWeight, XSIZE*YSIZE);
float *count = NULL;
cudaMalloc(&count, XSIZE*YSIZE);
float defaultScale = 2;
long numel = 1;
long stride = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cunn_LookupTable_accGradParametersKernel<<<gridBlock,threadBlock>>>(input,indices,gradOutput,gradWeight,count,defaultScale,numel,stride);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cunn_LookupTable_accGradParametersKernel<<<gridBlock,threadBlock>>>(input,indices,gradOutput,gradWeight,count,defaultScale,numel,stride);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cunn_LookupTable_accGradParametersKernel<<<gridBlock,threadBlock>>>(input,indices,gradOutput,gradWeight,count,defaultScale,numel,stride);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
79c3349f9d0ae891b90e4d01ea600bd8d088dd7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// mandelbrot.cu - CUDA code to generate mandelbrot image
//
// See:
// http://selkie.macalester.edu/csinparallel/modules/CUDAArchitecture/build/html/1-Mandelbrot/Mandelbrot.html
//
// To compile:
// $ nvcc mandelbrot.cu -g -D SHOW_X -o mandelbrot -lX11 -lgomp -lm
//
#include <stdio.h>
#include <unistd.h>
#include <err.h>
#include <stdint.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <omp.h>
static int dim = 512;
static int n = 512;
static int m = 512;
static int max_iter = 100;
static uint32_t *colors;
uint32_t *dev_colors;
// X11 data
#ifdef SHOW_X
static Display *dpy;
static XImage *bitmap;
static Window win;
static Atom wmDeleteMessage;
static GC gc;
//destroy window and x variables
static void exit_x11(void){
XDestroyWindow(dpy, win);
XCloseDisplay(dpy);
}
// create Xwindow
static void init_x11(){
// Attempt to open the display
dpy = XOpenDisplay(NULL);
// Failure
if (!dpy) exit(0);
uint32_t long white = WhitePixel(dpy,DefaultScreen(dpy));
uint32_t long black = BlackPixel(dpy,DefaultScreen(dpy));
win = XCreateSimpleWindow(dpy, DefaultRootWindow(dpy),
0, 0, dim, dim, 0, black, white);
// We want to be notified when the window appears
XSelectInput(dpy, win, StructureNotifyMask);
// Make it appear
XMapWindow(dpy, win);
while (1){
XEvent e;
XNextEvent(dpy, &e);
if (e.type == MapNotify) break;
}
XTextProperty tp;
char name[128] = "Mandelbrot";
char *n = name;
Status st = XStringListToTextProperty(&n, 1, &tp);
if (st) XSetWMName(dpy, win, &tp);
// Wait for the MapNotify event
XFlush(dpy);
int depth = DefaultDepth(dpy, DefaultScreen(dpy));
Visual *visual = DefaultVisual(dpy, DefaultScreen(dpy));
bitmap = XCreateImage(dpy, visual, depth, ZPixmap, 0,
(char*) malloc(dim * dim * 32), dim, dim, 32, 0);
// Init GC
gc = XCreateGC(dpy, win, 0, NULL);
XSetForeground(dpy, gc, black);
XSelectInput(dpy, win, ExposureMask | KeyPressMask | StructureNotifyMask);
wmDeleteMessage = XInternAtom(dpy, "WM_DELETE_WINDOW", False);
XSetWMProtocols(dpy, win, &wmDeleteMessage, 1);
}
#endif
//create colors used to draw the mandelbrot set
void init_colours(void) {
float freq = 6.3 / max_iter;
for (int i = 0; i < max_iter; i++){
char r = sin(freq * i + 3) * 127 + 128;
char g = sin(freq * i + 5) * 127 + 128;
char b = sin(freq * i + 1) * 127 + 128;
colors[i] = b + 256 * g + 256 * 256 * r;
}
colors[max_iter] = 0;
}
void checkErr(hipError_t err, const char* msg){
if (err != hipSuccess){
fprintf(stderr, "%s (error code %d: '%s'", msg, err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* the mandelbrot set is defined as all complex numbers c such that the
equation z = z^2 + c remains bounded. In practice, we calculate max_iter
iterations of this formula and if the magnitude of z is < 2 we assume it
is in the set. The greater max_iters the more accurate our representation */
__device__ uint32_t mandel_double(double cr, double ci, int max_iter) {
double zr = 0;
double zi = 0;
double zrsqr = 0;
double zisqr = 0;
uint32_t i;
for (i = 0; i < max_iter; i++){
zi = zr * zi;
zi += zi;
zi += ci;
zr = zrsqr - zisqr + cr;
zrsqr = zr * zr;
zisqr = zi * zi;
//the fewer iterations it takes to diverge, the farther from the set
if (zrsqr + zisqr > 4.0) break;
}
return i;
}
/* turn each x y coordinate into a complex number and run the mandelbrot formula on it */
__global__ void mandel_kernel(uint32_t *counts, double xmin, double ymin,
double step, int max_iter, int dim, uint32_t *colors) {
int pix_per_thread = dim * dim / (gridDim.x * blockDim.x);
int tId = blockDim.x * blockIdx.x + threadIdx.x;
int offset = pix_per_thread * tId;
for (int i = offset; i < offset + pix_per_thread; i++){
int x = i % dim;
int y = i / dim;
double cr = xmin + x * step;
double ci = ymin + y * step;
counts[y * dim + x] = colors[mandel_double(cr, ci, max_iter)];
}
if (gridDim.x * blockDim.x * pix_per_thread < dim * dim
&& tId < (dim * dim) - (blockDim.x * gridDim.x)){
int i = blockDim.x * gridDim.x * pix_per_thread + tId;
int x = i % dim;
int y = i / dim;
double cr = xmin + x * step;
double ci = ymin + y * step;
counts[y * dim + x] = colors[mandel_double(cr, ci, max_iter)];
}
}
/* For each point, evaluate its colour */
static void display_double(double xcen, double ycen, double scale,
uint32_t *dev_counts, uint32_t *colors){
dim3 numBlocks(dim,dim);
double xmin = xcen - (scale/2);
double ymin = ycen - (scale/2);
double step = scale / dim;
hipError_t err = hipSuccess;
#ifdef BENCHMARK
double start = omp_get_wtime();
#endif
hipLaunchKernelGGL(( mandel_kernel), dim3(n), dim3(m), 0, 0, dev_counts, xmin , ymin, step, max_iter, dim, colors);
checkErr(err, "Failed to run Kernel");
#ifdef SHOW_X
err = hipMemcpy(bitmap->data, dev_counts, dim * dim * sizeof(uint32_t), hipMemcpyDeviceToHost);
#else
void *data = malloc(dim * dim * sizeof(uint32_t));
err = hipMemcpy(data, dev_counts, dim * dim * sizeof(uint32_t), hipMemcpyDeviceToHost);
#endif
checkErr(err, "Failed to copy dev_counts back");
#ifdef BENCHMARK
double stop = omp_get_wtime();
printf("Blocks: %d\tThreads per Block: %d\tSize:%dx%d\tDepth: %d\tTime: %f\n",
n, m, dim, dim, max_iter, stop - start);
#endif
#ifdef SHOW_X
XPutImage(dpy, win, gc, bitmap,
0, 0, 0, 0,
dim, dim);
XFlush(dpy);
#endif
}
int main(int argc, char** argv){
hipError_t err = hipSuccess;
if (argc >= 2)
n = atoi(argv[1]);
if (argc >= 3)
m = atoi(argv[2]);
if (argc >= 4)
dim = atoi(argv[3]);
if (argc >= 5)
max_iter = atoi(argv[4]);
size_t color_size = (max_iter +1) * sizeof(uint32_t);
colors = (uint32_t *) malloc(color_size);
hipMalloc((void**)&dev_colors, color_size);
double xcen = -0.5;
double ycen = 0;
double scale = 3;
#ifdef SHOW_X
init_x11();
#endif
init_colours();
hipMemcpy(dev_colors, colors, color_size, hipMemcpyHostToDevice);
free(colors);
uint32_t *dev_counts = NULL;
size_t img_size = dim * dim * sizeof(uint32_t);
err = hipMalloc(&dev_counts, img_size);
checkErr(err, "Failed to allocate dev_counts");
display_double(xcen, ycen, scale, dev_counts, dev_colors);
#ifdef SHOW_X
while(1) {
XEvent event;
KeySym key;
char text[255];
XNextEvent(dpy, &event);
while (XPending(dpy) > 0)
XNextEvent(dpy, &event);
/* Just redraw everything on expose */
if ((event.type == Expose) && !event.xexpose.count){
XPutImage(dpy, win, gc, bitmap,
0, 0, 0, 0,
dim, dim);
}
/* Press 'x' to exit */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'x') break;
/* Press 'a' to go left */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'a'){
xcen -= 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'w' to go up */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'w'){
ycen -= 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 's' to go down */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 's'){
ycen += 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'd' to go right */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'd'){
xcen += 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'q' to zoom out */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'q'){
scale *= 1.25;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'e' to zoom in */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'e'){
scale *= .80;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Or simply close the window */
if ((event.type == ClientMessage) &&
((Atom) event.xclient.data.l[0] == wmDeleteMessage))
break;
}
exit_x11();
#endif
hipFree(dev_counts);
hipFree(dev_colors);
return 0;
}
| 79c3349f9d0ae891b90e4d01ea600bd8d088dd7f.cu | // mandelbrot.cu - CUDA code to generate mandelbrot image
//
// See:
// http://selkie.macalester.edu/csinparallel/modules/CUDAArchitecture/build/html/1-Mandelbrot/Mandelbrot.html
//
// To compile:
// $ nvcc mandelbrot.cu -g -D SHOW_X -o mandelbrot -lX11 -lgomp -lm
//
#include <stdio.h>
#include <unistd.h>
#include <err.h>
#include <stdint.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <omp.h>
static int dim = 512;
static int n = 512;
static int m = 512;
static int max_iter = 100;
static uint32_t *colors;
uint32_t *dev_colors;
// X11 data
#ifdef SHOW_X
static Display *dpy;
static XImage *bitmap;
static Window win;
static Atom wmDeleteMessage;
static GC gc;
//destroy window and x variables
static void exit_x11(void){
XDestroyWindow(dpy, win);
XCloseDisplay(dpy);
}
// create Xwindow
static void init_x11(){
// Attempt to open the display
dpy = XOpenDisplay(NULL);
// Failure
if (!dpy) exit(0);
uint32_t long white = WhitePixel(dpy,DefaultScreen(dpy));
uint32_t long black = BlackPixel(dpy,DefaultScreen(dpy));
win = XCreateSimpleWindow(dpy, DefaultRootWindow(dpy),
0, 0, dim, dim, 0, black, white);
// We want to be notified when the window appears
XSelectInput(dpy, win, StructureNotifyMask);
// Make it appear
XMapWindow(dpy, win);
while (1){
XEvent e;
XNextEvent(dpy, &e);
if (e.type == MapNotify) break;
}
XTextProperty tp;
char name[128] = "Mandelbrot";
char *n = name;
Status st = XStringListToTextProperty(&n, 1, &tp);
if (st) XSetWMName(dpy, win, &tp);
// Wait for the MapNotify event
XFlush(dpy);
int depth = DefaultDepth(dpy, DefaultScreen(dpy));
Visual *visual = DefaultVisual(dpy, DefaultScreen(dpy));
bitmap = XCreateImage(dpy, visual, depth, ZPixmap, 0,
(char*) malloc(dim * dim * 32), dim, dim, 32, 0);
// Init GC
gc = XCreateGC(dpy, win, 0, NULL);
XSetForeground(dpy, gc, black);
XSelectInput(dpy, win, ExposureMask | KeyPressMask | StructureNotifyMask);
wmDeleteMessage = XInternAtom(dpy, "WM_DELETE_WINDOW", False);
XSetWMProtocols(dpy, win, &wmDeleteMessage, 1);
}
#endif
//create colors used to draw the mandelbrot set
void init_colours(void) {
float freq = 6.3 / max_iter;
for (int i = 0; i < max_iter; i++){
char r = sin(freq * i + 3) * 127 + 128;
char g = sin(freq * i + 5) * 127 + 128;
char b = sin(freq * i + 1) * 127 + 128;
colors[i] = b + 256 * g + 256 * 256 * r;
}
colors[max_iter] = 0;
}
void checkErr(cudaError_t err, const char* msg){
if (err != cudaSuccess){
fprintf(stderr, "%s (error code %d: '%s'", msg, err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* the mandelbrot set is defined as all complex numbers c such that the
equation z = z^2 + c remains bounded. In practice, we calculate max_iter
iterations of this formula and if the magnitude of z is < 2 we assume it
is in the set. The greater max_iters the more accurate our representation */
__device__ uint32_t mandel_double(double cr, double ci, int max_iter) {
double zr = 0;
double zi = 0;
double zrsqr = 0;
double zisqr = 0;
uint32_t i;
for (i = 0; i < max_iter; i++){
zi = zr * zi;
zi += zi;
zi += ci;
zr = zrsqr - zisqr + cr;
zrsqr = zr * zr;
zisqr = zi * zi;
//the fewer iterations it takes to diverge, the farther from the set
if (zrsqr + zisqr > 4.0) break;
}
return i;
}
/* turn each x y coordinate into a complex number and run the mandelbrot formula on it */
__global__ void mandel_kernel(uint32_t *counts, double xmin, double ymin,
double step, int max_iter, int dim, uint32_t *colors) {
int pix_per_thread = dim * dim / (gridDim.x * blockDim.x);
int tId = blockDim.x * blockIdx.x + threadIdx.x;
int offset = pix_per_thread * tId;
for (int i = offset; i < offset + pix_per_thread; i++){
int x = i % dim;
int y = i / dim;
double cr = xmin + x * step;
double ci = ymin + y * step;
counts[y * dim + x] = colors[mandel_double(cr, ci, max_iter)];
}
if (gridDim.x * blockDim.x * pix_per_thread < dim * dim
&& tId < (dim * dim) - (blockDim.x * gridDim.x)){
int i = blockDim.x * gridDim.x * pix_per_thread + tId;
int x = i % dim;
int y = i / dim;
double cr = xmin + x * step;
double ci = ymin + y * step;
counts[y * dim + x] = colors[mandel_double(cr, ci, max_iter)];
}
}
/* For each point, evaluate its colour */
static void display_double(double xcen, double ycen, double scale,
uint32_t *dev_counts, uint32_t *colors){
dim3 numBlocks(dim,dim);
double xmin = xcen - (scale/2);
double ymin = ycen - (scale/2);
double step = scale / dim;
cudaError_t err = cudaSuccess;
#ifdef BENCHMARK
double start = omp_get_wtime();
#endif
mandel_kernel<<<n, m>>>(dev_counts, xmin , ymin, step, max_iter, dim, colors);
checkErr(err, "Failed to run Kernel");
#ifdef SHOW_X
err = cudaMemcpy(bitmap->data, dev_counts, dim * dim * sizeof(uint32_t), cudaMemcpyDeviceToHost);
#else
void *data = malloc(dim * dim * sizeof(uint32_t));
err = cudaMemcpy(data, dev_counts, dim * dim * sizeof(uint32_t), cudaMemcpyDeviceToHost);
#endif
checkErr(err, "Failed to copy dev_counts back");
#ifdef BENCHMARK
double stop = omp_get_wtime();
printf("Blocks: %d\tThreads per Block: %d\tSize:%dx%d\tDepth: %d\tTime: %f\n",
n, m, dim, dim, max_iter, stop - start);
#endif
#ifdef SHOW_X
XPutImage(dpy, win, gc, bitmap,
0, 0, 0, 0,
dim, dim);
XFlush(dpy);
#endif
}
int main(int argc, char** argv){
cudaError_t err = cudaSuccess;
if (argc >= 2)
n = atoi(argv[1]);
if (argc >= 3)
m = atoi(argv[2]);
if (argc >= 4)
dim = atoi(argv[3]);
if (argc >= 5)
max_iter = atoi(argv[4]);
size_t color_size = (max_iter +1) * sizeof(uint32_t);
colors = (uint32_t *) malloc(color_size);
cudaMalloc((void**)&dev_colors, color_size);
double xcen = -0.5;
double ycen = 0;
double scale = 3;
#ifdef SHOW_X
init_x11();
#endif
init_colours();
cudaMemcpy(dev_colors, colors, color_size, cudaMemcpyHostToDevice);
free(colors);
uint32_t *dev_counts = NULL;
size_t img_size = dim * dim * sizeof(uint32_t);
err = cudaMalloc(&dev_counts, img_size);
checkErr(err, "Failed to allocate dev_counts");
display_double(xcen, ycen, scale, dev_counts, dev_colors);
#ifdef SHOW_X
while(1) {
XEvent event;
KeySym key;
char text[255];
XNextEvent(dpy, &event);
while (XPending(dpy) > 0)
XNextEvent(dpy, &event);
/* Just redraw everything on expose */
if ((event.type == Expose) && !event.xexpose.count){
XPutImage(dpy, win, gc, bitmap,
0, 0, 0, 0,
dim, dim);
}
/* Press 'x' to exit */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'x') break;
/* Press 'a' to go left */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'a'){
xcen -= 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'w' to go up */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'w'){
ycen -= 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 's' to go down */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 's'){
ycen += 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'd' to go right */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'd'){
xcen += 20 * scale / dim;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'q' to zoom out */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'q'){
scale *= 1.25;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Press 'e' to zoom in */
if ((event.type == KeyPress) &&
XLookupString(&event.xkey, text, 255, &key, 0) == 1)
if (text[0] == 'e'){
scale *= .80;
display_double(xcen, ycen, scale, dev_counts, dev_colors);
}
/* Or simply close the window */
if ((event.type == ClientMessage) &&
((Atom) event.xclient.data.l[0] == wmDeleteMessage))
break;
}
exit_x11();
#endif
cudaFree(dev_counts);
cudaFree(dev_colors);
return 0;
}
|
e2f2fcb76a299fad82b877073a6400871ed1efef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void PyrDown_y_g(u_int8_t *ptGrayIn,u_int8_t *ptGrayOut, int w, int h)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
if(ix<w && iy<h)// && y>2)
{
float p_2 = ptGrayIn[ix*2+(iy*2-2)*w*2]/16.0f;
float p_1 = ptGrayIn[ix*2+(iy*2-1)*w*2]/4.0f;
float p0 = 3.0f*ptGrayIn[ix*2+iy*2*w*2]/8.0f;
float pp1 = ptGrayIn[ix*2+(iy*2+1)*w*2]/4.0f;
float pp2 = ptGrayIn[ix*2+(iy*2+2)*w*2]/16.0f;
int output = p_2 + p_1 + p0 + pp1 + pp2;
ptGrayOut[ix+iy*w] = min(output,255);
}
} | e2f2fcb76a299fad82b877073a6400871ed1efef.cu | #include "includes.h"
__global__ void PyrDown_y_g(u_int8_t *ptGrayIn,u_int8_t *ptGrayOut, int w, int h)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
if(ix<w && iy<h)// && y>2)
{
float p_2 = ptGrayIn[ix*2+(iy*2-2)*w*2]/16.0f;
float p_1 = ptGrayIn[ix*2+(iy*2-1)*w*2]/4.0f;
float p0 = 3.0f*ptGrayIn[ix*2+iy*2*w*2]/8.0f;
float pp1 = ptGrayIn[ix*2+(iy*2+1)*w*2]/4.0f;
float pp2 = ptGrayIn[ix*2+(iy*2+2)*w*2]/16.0f;
int output = p_2 + p_1 + p0 + pp1 + pp2;
ptGrayOut[ix+iy*w] = min(output,255);
}
} |
536eefa94642d9add70e34ddedda397c9282c6f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <core/basic/basic.hpp>
#include <gtest/gtest.h>
using namespace koishi;
using namespace core;
#ifdef KOISHI_USE_CUDA
struct A : emittable
{
A( int i ) :
n( i )
{
poly::vector<int> vv;
for ( int i = 0; i <= n; ++i )
{
vv.emplace_back( i );
}
v = std::move( vv );
}
__host__ __device__ virtual int f() const
{
int s = v.size() * 1000;
for ( int i = 0; i != v.size(); ++i )
{
s += v[ i ];
}
return s;
}
int n;
poly::vector<int> v;
};
__global__ void add( const poly::vector<A> &vec, poly::vector<int> &n, poly::vector<const int *> &p )
{
//n[0] = 1; n[1] = 2;
//n[0] = 1;
for ( auto i = 0; i != vec.size(); ++i )
//n[i] = 1;
n[ i ] = vec[ i ].f(), p[ i ] = nullptr;
}
#endif
TEST( test_poly_vector, struct_with_non_standard_layout )
{
// testing::internal::CaptureStdout();
#ifdef KOISHI_USE_CUDA
poly::vector<A> view;
int n = 200;
for ( int i = 0; i != n; ++i )
{
view.emplace_back( i );
}
EXPECT_EQ( view.size(), n );
KLOG( view.data() );
poly::vector<int> nn( view.size() );
poly::vector<const int *> pp( view.size() );
KLOG( nn.data() );
KLOG( pp.data() );
EXPECT_EQ( n, nn.size() );
EXPECT_EQ( n, pp.size() );
EXPECT_EQ( n, view.size() );
poly::kernel( add, 1, 1 )( view, nn, pp );
KLOG( nn.data() );
KLOG( pp.data() );
EXPECT_EQ( n, nn.size() );
KLOG( nn.data() );
KLOG( pp.data() );
EXPECT_EQ( n, nn.size() );
int ss = 0;
for ( int i = 0; i != nn.size(); ++i )
{
ss += i;
EXPECT_EQ( nn[ i ], ss + 1000 * ( i + 1 ) );
std::cout << nn[ i ] << std::endl;
}
#else
KLOG( "no cuda toolkit provided" );
#endif
}
TEST( test_poly_vector, initializer_list )
{
poly::vector<int> a = { 1, 2, 3 };
poly::vector<int> b{};
a = {};
for ( int i = 0; i != 10000; ++i )
{
a.resize( i + 1 );
a[ i ] = i;
b.emplace_back( i );
}
for ( int i = 0; i != 10000; ++i )
{
ASSERT_EQ( a[ i ], i );
ASSERT_EQ( b[ i ], i );
}
}
TEST( test_poly_vector, emit_empty_vector )
{
#ifdef KOISHI_USE_CUDA
poly::vector<A> view;
poly::vector<int> a;
poly::vector<const int *> b;
poly::kernel( add, 1, 1 )( view, a, b );
#endif
}
| 536eefa94642d9add70e34ddedda397c9282c6f0.cu | #include <core/basic/basic.hpp>
#include <gtest/gtest.h>
using namespace koishi;
using namespace core;
#ifdef KOISHI_USE_CUDA
struct A : emittable
{
A( int i ) :
n( i )
{
poly::vector<int> vv;
for ( int i = 0; i <= n; ++i )
{
vv.emplace_back( i );
}
v = std::move( vv );
}
__host__ __device__ virtual int f() const
{
int s = v.size() * 1000;
for ( int i = 0; i != v.size(); ++i )
{
s += v[ i ];
}
return s;
}
int n;
poly::vector<int> v;
};
__global__ void add( const poly::vector<A> &vec, poly::vector<int> &n, poly::vector<const int *> &p )
{
//n[0] = 1; n[1] = 2;
//n[0] = 1;
for ( auto i = 0; i != vec.size(); ++i )
//n[i] = 1;
n[ i ] = vec[ i ].f(), p[ i ] = nullptr;
}
#endif
TEST( test_poly_vector, struct_with_non_standard_layout )
{
// testing::internal::CaptureStdout();
#ifdef KOISHI_USE_CUDA
poly::vector<A> view;
int n = 200;
for ( int i = 0; i != n; ++i )
{
view.emplace_back( i );
}
EXPECT_EQ( view.size(), n );
KLOG( view.data() );
poly::vector<int> nn( view.size() );
poly::vector<const int *> pp( view.size() );
KLOG( nn.data() );
KLOG( pp.data() );
EXPECT_EQ( n, nn.size() );
EXPECT_EQ( n, pp.size() );
EXPECT_EQ( n, view.size() );
poly::kernel( add, 1, 1 )( view, nn, pp );
KLOG( nn.data() );
KLOG( pp.data() );
EXPECT_EQ( n, nn.size() );
KLOG( nn.data() );
KLOG( pp.data() );
EXPECT_EQ( n, nn.size() );
int ss = 0;
for ( int i = 0; i != nn.size(); ++i )
{
ss += i;
EXPECT_EQ( nn[ i ], ss + 1000 * ( i + 1 ) );
std::cout << nn[ i ] << std::endl;
}
#else
KLOG( "no cuda toolkit provided" );
#endif
}
TEST( test_poly_vector, initializer_list )
{
poly::vector<int> a = { 1, 2, 3 };
poly::vector<int> b{};
a = {};
for ( int i = 0; i != 10000; ++i )
{
a.resize( i + 1 );
a[ i ] = i;
b.emplace_back( i );
}
for ( int i = 0; i != 10000; ++i )
{
ASSERT_EQ( a[ i ], i );
ASSERT_EQ( b[ i ], i );
}
}
TEST( test_poly_vector, emit_empty_vector )
{
#ifdef KOISHI_USE_CUDA
poly::vector<A> view;
poly::vector<int> a;
poly::vector<const int *> b;
poly::kernel( add, 1, 1 )( view, a, b );
#endif
}
|
338d1645e0fc848a83ac95550f135364b7524d9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file histo-global.cu histogram with global memory atomics */
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** CUDA check macro */
#define cucheck(call) \
{\
hipError_t res = (call);\
if(res != hipSuccess) {\
const char* err_str = hipGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
/** time spent in device */
double gpu_time = 0;
/** a useful function to compute the number of threads */
int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 512
/** block size along */
#define BSX 64
#define BSY 4
/** maximum recursion depth */
#define MAX_DEPTH 4
/** region below which do per-pixel */
#define MIN_SIZE 32
/** subdivision factor along each axis */
#define SUBDIV 4
/** subdivision when launched from host */
#define INIT_SUBDIV 32
/** computes the dwell for a single pixel */
__device__ int pixel_dwell
(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
/** computes the dwells for Mandelbrot image
@param dwells the output array
@param w the width of the output image
@param h the height of the output image
@param cmin the complex value associated with the left-bottom corner of the
image
@param cmax the complex value associated with the right-top corner of the
image
*/
__global__ void mandelbrot_k
(int *dwells, int w, int h, complex cmin, complex cmax) {
// complex value to start iteration (c)
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int dwell = pixel_dwell(w, h, cmin, cmax, x, y);
dwells[y * w + x] = dwell;
} // mandelbrot_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
/** data size */
#define H (16 * 1024)
#define W (16 * 1024)
#define IMAGE_PATH "./mandelbrot.png"
int main(int argc, char **argv) {
// allocate memory
int w = W, h = H;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(hipMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
// compute the dwells, copy them back
double t1 = omp_get_wtime();
dim3 bs(BSX, BSY), grid(INIT_SUBDIV, INIT_SUBDIV);
hipLaunchKernelGGL(( mandelbrot_k), dim3(grid), dim3(bs), 0, 0,
d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1));
cucheck(hipDeviceSynchronize());
double t2 = omp_get_wtime();
cucheck(hipMemcpy(h_dwells, d_dwells, dwell_sz, hipMemcpyDeviceToHost));
gpu_time = t2 - t1;
// save the image to PNG
save_image(IMAGE_PATH, h_dwells, w, h);
// print performance
printf("Mandelbrot set computed in %.3lf s, at %.3lf Mpix/s\n", gpu_time,
h * w * 1e-6 / gpu_time);
// free data
hipFree(d_dwells);
free(h_dwells);
return 0;
} // main
| 338d1645e0fc848a83ac95550f135364b7524d9c.cu | /** @file histo-global.cu histogram with global memory atomics */
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** CUDA check macro */
#define cucheck(call) \
{\
cudaError_t res = (call);\
if(res != cudaSuccess) {\
const char* err_str = cudaGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
/** time spent in device */
double gpu_time = 0;
/** a useful function to compute the number of threads */
int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 512
/** block size along */
#define BSX 64
#define BSY 4
/** maximum recursion depth */
#define MAX_DEPTH 4
/** region below which do per-pixel */
#define MIN_SIZE 32
/** subdivision factor along each axis */
#define SUBDIV 4
/** subdivision when launched from host */
#define INIT_SUBDIV 32
/** computes the dwell for a single pixel */
__device__ int pixel_dwell
(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
/** computes the dwells for Mandelbrot image
@param dwells the output array
@param w the width of the output image
@param h the height of the output image
@param cmin the complex value associated with the left-bottom corner of the
image
@param cmax the complex value associated with the right-top corner of the
image
*/
__global__ void mandelbrot_k
(int *dwells, int w, int h, complex cmin, complex cmax) {
// complex value to start iteration (c)
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int dwell = pixel_dwell(w, h, cmin, cmax, x, y);
dwells[y * w + x] = dwell;
} // mandelbrot_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
/** data size */
#define H (16 * 1024)
#define W (16 * 1024)
#define IMAGE_PATH "./mandelbrot.png"
int main(int argc, char **argv) {
// allocate memory
int w = W, h = H;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(cudaMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
// compute the dwells, copy them back
double t1 = omp_get_wtime();
dim3 bs(BSX, BSY), grid(INIT_SUBDIV, INIT_SUBDIV);
mandelbrot_k<<<grid, bs>>>
(d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1));
cucheck(cudaDeviceSynchronize());
double t2 = omp_get_wtime();
cucheck(cudaMemcpy(h_dwells, d_dwells, dwell_sz, cudaMemcpyDeviceToHost));
gpu_time = t2 - t1;
// save the image to PNG
save_image(IMAGE_PATH, h_dwells, w, h);
// print performance
printf("Mandelbrot set computed in %.3lf s, at %.3lf Mpix/s\n", gpu_time,
h * w * 1e-6 / gpu_time);
// free data
cudaFree(d_dwells);
free(h_dwells);
return 0;
} // main
|
a3734a45668c837118da448f96d1e90f302aca60.hip | // !!! This is a file automatically generated by hipify!!!
#include "async_utils.cuh"
#include "cuda_utils.h"
#include "handle_utils.h"
#include "matrix_utils.h"
#include "pinned_host_vector.h"
#include "preprocessor.h"
#include "random_forest.cuh"
#include "stream_allocator.h"
#include <cuml/fil/fil.h>
#include <thrust/async/copy.h>
#include <thrust/device_vector.h>
#include <treelite/c_api.h>
#include <cuml/ensemble/randomforest.hpp>
#include <Rcpp.h>
#include <chrono>
#include <memory>
#include <unordered_map>
#include <vector>
using RandomForestClassifierUPtr =
std::unique_ptr<ML::RandomForestClassifierD,
cuml4r::RandomForestMetaDataDeleter<double, int>>;
namespace {
struct RandomForestClassifierModel {
RandomForestClassifierUPtr const rf_;
std::unordered_map<int, int> const inverseLabelsMap_;
__host__ RandomForestClassifierModel(
RandomForestClassifierUPtr rf,
std::unordered_map<int, int>&& inverse_labels_map) noexcept
: rf_(std::move(rf)), inverseLabelsMap_(std::move(inverse_labels_map)) {}
};
// map labels into consecutive integral values in [0, to n_unique_labels)
__host__ cuml4r::pinned_host_vector<int> preprocess_labels(
std::unordered_map<int, int>& labels_map, std::vector<int> const& labels) {
int n_unique_labels = 0;
cuml4r::pinned_host_vector<int> preprocessed_labels;
preprocessed_labels.reserve(labels.size());
for (auto const label : labels) {
auto const p = labels_map.emplace(label, n_unique_labels);
if (p.second) {
++n_unique_labels;
}
preprocessed_labels.push_back(p.first->second);
}
return preprocessed_labels;
}
// reverse the mapping done by preprocess_labels
__host__ void postprocess_labels(
cuml4r::pinned_host_vector<int>& labels,
std::unordered_map<int, int> const& inverse_labels_map) {
for (auto& label : labels) {
auto iter = inverse_labels_map.find(label);
if (iter != inverse_labels_map.cend()) {
label = iter->second;
} else {
label = 0;
}
}
}
__host__ std::unordered_map<int, int> reverse(
std::unordered_map<int, int> const& m) {
std::unordered_map<int, int> r;
r.reserve(m.size());
for (auto const& p : m) {
r[p.second] = p.first;
}
return r;
}
} // namespace
namespace cuml4r {
__host__ SEXP rf_classifier_fit(
Rcpp::NumericMatrix const& input, Rcpp::IntegerVector const& labels,
int const n_trees, bool const bootstrap, float const max_samples,
int const n_streams, int const max_depth, int const max_leaves,
float const max_features, int const n_bins, int const min_samples_leaf,
int const min_samples_split, int const split_criterion,
float const min_impurity_decrease, int const max_batch_size,
int const verbosity) {
auto const input_m = cuml4r::Matrix<>(input, /*transpose=*/true);
int const n_samples = input_m.numCols;
int const n_features = input_m.numRows;
auto rf = RandomForestClassifierUPtr(new ML::RandomForestClassifierD);
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle(n_streams);
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
// rf input data & labels
auto const& h_input = input_m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream_view.value(), h_input.cbegin(), h_input.cend(), d_input.begin());
std::unordered_map<int, int> labels_map;
auto const h_labels =
preprocess_labels(labels_map, Rcpp::as<std::vector<int>>(labels));
thrust::device_vector<int> d_labels(h_labels.size());
auto CUML4R_ANONYMOUS_VARIABLE(labels_h2d) = cuml4r::async_copy(
stream_view.value(), h_labels.cbegin(), h_labels.cend(), d_labels.begin());
{
auto* rf_ptr = rf.get();
ML::fit(
handle, rf_ptr, d_input.data().get(), n_samples, n_features,
d_labels.data().get(),
/*n_unique_labels=*/static_cast<int>(labels_map.size()),
ML::set_rf_params(max_depth, max_leaves, max_features, n_bins,
min_samples_leaf, min_samples_split,
min_impurity_decrease, bootstrap, n_trees, max_samples,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch())
.count(),
static_cast<ML::CRITERION>(split_criterion), n_streams,
max_batch_size),
/*verbosity=*/verbosity);
CUDA_RT_CALL(hipStreamSynchronize(stream_view.value()));
if (rf_ptr != rf.get()) {
// NOTE: in theory this should never happen though
rf = RandomForestClassifierUPtr(rf_ptr);
}
}
return Rcpp::XPtr<RandomForestClassifierModel>(
new RandomForestClassifierModel(std::move(rf), reverse(labels_map)));
}
__host__ Rcpp::IntegerVector rf_classifier_predict(
SEXP model_xptr, Rcpp::NumericMatrix const& input, int const verbosity) {
auto const input_m = cuml4r::Matrix<>(input, /*transpose=*/false);
int const n_samples = input_m.numRows;
int const n_features = input_m.numCols;
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
auto model = Rcpp::XPtr<RandomForestClassifierModel>(model_xptr);
// inputs
auto const& h_input = input_m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream_view.value(), h_input.cbegin(), h_input.cend(), d_input.begin());
// outputs
thrust::device_vector<int> d_predictions(n_samples);
ML::predict(handle, /*forest=*/model->rf_.get(), d_input.data().get(),
n_samples, n_features, /*predictions=*/d_predictions.data().get(),
/*verbosity=*/verbosity);
cuml4r::pinned_host_vector<int> h_predictions(n_samples);
auto CUML4R_ANONYMOUS_VARIABLE(predictions_d2h) =
cuml4r::async_copy(stream_view.value(), d_predictions.cbegin(),
d_predictions.cend(), h_predictions.begin());
CUDA_RT_CALL(hipStreamSynchronize(stream_view.value()));
// post-process prediction labels
postprocess_labels(h_predictions, model->inverseLabelsMap_);
return Rcpp::IntegerVector(h_predictions.begin(), h_predictions.end());
}
__host__ Rcpp::NumericMatrix rf_classifier_predict_class_probabilities(
SEXP model_xptr, Rcpp::NumericMatrix const& input) {
#ifndef CUML4R_TREELITE_C_API_MISSING
auto const input_m = cuml4r::Matrix<float>(input, /*transpose=*/false);
int const n_samples = input_m.numRows;
int const n_features = input_m.numCols;
auto model = Rcpp::XPtr<RandomForestClassifierModel>(model_xptr);
int const num_classes = model->inverseLabelsMap_.size();
ModelHandle tl_handle;
ML::build_treelite_forest(
/*model=*/&tl_handle,
/*forest=*/model->rf_.get(),
/*num_features=*/n_features,
/*task_category=*/num_classes);
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
ML::fil::forest_t forest;
ML::fil::treelite_params_t params;
params.algo = ML::fil::algo_t::ALGO_AUTO;
// output class probabilities instead of classes
params.output_class = false;
params.storage_type = ML::fil::storage_type_t::AUTO;
params.blocks_per_sm = 0;
params.threads_per_tree = 1;
params.n_items = 0;
params.pforest_shape_str = nullptr;
ML::fil::from_treelite(handle, /*pforest=*/&forest,
/*model=*/tl_handle, /*tl_params=*/¶ms);
// FIL input
auto const& h_x = input_m.values;
thrust::device_vector<float> d_x(h_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(x_h2d) = cuml4r::async_copy(
handle.get_stream(), h_x.cbegin(), h_x.cend(), d_x.begin());
// FIL output
thrust::device_vector<float> d_preds(num_classes * n_samples);
ML::fil::predict(/*h=*/handle, /*f=*/forest,
/*preds=*/d_preds.data().get(),
/*data=*/d_x.data().get(), /*num_rows=*/n_samples,
/*predict_proba=*/true);
cuml4r::pinned_host_vector<float> h_preds(d_preds.size());
auto CUML4R_ANONYMOUS_VARIABLE(preds_d2h) = cuml4r::async_copy(
handle.get_stream(), d_preds.cbegin(), d_preds.cend(), h_preds.begin());
CUDA_RT_CALL(hipStreamSynchronize(handle.get_stream()));
return Rcpp::transpose(
Rcpp::NumericMatrix(num_classes, n_samples, h_preds.begin()));
#else
return {};
#endif
}
} // namespace cuml4r
| a3734a45668c837118da448f96d1e90f302aca60.cu | #include "async_utils.cuh"
#include "cuda_utils.h"
#include "handle_utils.h"
#include "matrix_utils.h"
#include "pinned_host_vector.h"
#include "preprocessor.h"
#include "random_forest.cuh"
#include "stream_allocator.h"
#include <cuml/fil/fil.h>
#include <thrust/async/copy.h>
#include <thrust/device_vector.h>
#include <treelite/c_api.h>
#include <cuml/ensemble/randomforest.hpp>
#include <Rcpp.h>
#include <chrono>
#include <memory>
#include <unordered_map>
#include <vector>
using RandomForestClassifierUPtr =
std::unique_ptr<ML::RandomForestClassifierD,
cuml4r::RandomForestMetaDataDeleter<double, int>>;
namespace {
struct RandomForestClassifierModel {
RandomForestClassifierUPtr const rf_;
std::unordered_map<int, int> const inverseLabelsMap_;
__host__ RandomForestClassifierModel(
RandomForestClassifierUPtr rf,
std::unordered_map<int, int>&& inverse_labels_map) noexcept
: rf_(std::move(rf)), inverseLabelsMap_(std::move(inverse_labels_map)) {}
};
// map labels into consecutive integral values in [0, to n_unique_labels)
__host__ cuml4r::pinned_host_vector<int> preprocess_labels(
std::unordered_map<int, int>& labels_map, std::vector<int> const& labels) {
int n_unique_labels = 0;
cuml4r::pinned_host_vector<int> preprocessed_labels;
preprocessed_labels.reserve(labels.size());
for (auto const label : labels) {
auto const p = labels_map.emplace(label, n_unique_labels);
if (p.second) {
++n_unique_labels;
}
preprocessed_labels.push_back(p.first->second);
}
return preprocessed_labels;
}
// reverse the mapping done by preprocess_labels
__host__ void postprocess_labels(
cuml4r::pinned_host_vector<int>& labels,
std::unordered_map<int, int> const& inverse_labels_map) {
for (auto& label : labels) {
auto iter = inverse_labels_map.find(label);
if (iter != inverse_labels_map.cend()) {
label = iter->second;
} else {
label = 0;
}
}
}
__host__ std::unordered_map<int, int> reverse(
std::unordered_map<int, int> const& m) {
std::unordered_map<int, int> r;
r.reserve(m.size());
for (auto const& p : m) {
r[p.second] = p.first;
}
return r;
}
} // namespace
namespace cuml4r {
__host__ SEXP rf_classifier_fit(
Rcpp::NumericMatrix const& input, Rcpp::IntegerVector const& labels,
int const n_trees, bool const bootstrap, float const max_samples,
int const n_streams, int const max_depth, int const max_leaves,
float const max_features, int const n_bins, int const min_samples_leaf,
int const min_samples_split, int const split_criterion,
float const min_impurity_decrease, int const max_batch_size,
int const verbosity) {
auto const input_m = cuml4r::Matrix<>(input, /*transpose=*/true);
int const n_samples = input_m.numCols;
int const n_features = input_m.numRows;
auto rf = RandomForestClassifierUPtr(new ML::RandomForestClassifierD);
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle(n_streams);
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
// rf input data & labels
auto const& h_input = input_m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream_view.value(), h_input.cbegin(), h_input.cend(), d_input.begin());
std::unordered_map<int, int> labels_map;
auto const h_labels =
preprocess_labels(labels_map, Rcpp::as<std::vector<int>>(labels));
thrust::device_vector<int> d_labels(h_labels.size());
auto CUML4R_ANONYMOUS_VARIABLE(labels_h2d) = cuml4r::async_copy(
stream_view.value(), h_labels.cbegin(), h_labels.cend(), d_labels.begin());
{
auto* rf_ptr = rf.get();
ML::fit(
handle, rf_ptr, d_input.data().get(), n_samples, n_features,
d_labels.data().get(),
/*n_unique_labels=*/static_cast<int>(labels_map.size()),
ML::set_rf_params(max_depth, max_leaves, max_features, n_bins,
min_samples_leaf, min_samples_split,
min_impurity_decrease, bootstrap, n_trees, max_samples,
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch())
.count(),
static_cast<ML::CRITERION>(split_criterion), n_streams,
max_batch_size),
/*verbosity=*/verbosity);
CUDA_RT_CALL(cudaStreamSynchronize(stream_view.value()));
if (rf_ptr != rf.get()) {
// NOTE: in theory this should never happen though
rf = RandomForestClassifierUPtr(rf_ptr);
}
}
return Rcpp::XPtr<RandomForestClassifierModel>(
new RandomForestClassifierModel(std::move(rf), reverse(labels_map)));
}
__host__ Rcpp::IntegerVector rf_classifier_predict(
SEXP model_xptr, Rcpp::NumericMatrix const& input, int const verbosity) {
auto const input_m = cuml4r::Matrix<>(input, /*transpose=*/false);
int const n_samples = input_m.numRows;
int const n_features = input_m.numCols;
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
auto model = Rcpp::XPtr<RandomForestClassifierModel>(model_xptr);
// inputs
auto const& h_input = input_m.values;
thrust::device_vector<double> d_input(h_input.size());
auto CUML4R_ANONYMOUS_VARIABLE(input_h2d) = cuml4r::async_copy(
stream_view.value(), h_input.cbegin(), h_input.cend(), d_input.begin());
// outputs
thrust::device_vector<int> d_predictions(n_samples);
ML::predict(handle, /*forest=*/model->rf_.get(), d_input.data().get(),
n_samples, n_features, /*predictions=*/d_predictions.data().get(),
/*verbosity=*/verbosity);
cuml4r::pinned_host_vector<int> h_predictions(n_samples);
auto CUML4R_ANONYMOUS_VARIABLE(predictions_d2h) =
cuml4r::async_copy(stream_view.value(), d_predictions.cbegin(),
d_predictions.cend(), h_predictions.begin());
CUDA_RT_CALL(cudaStreamSynchronize(stream_view.value()));
// post-process prediction labels
postprocess_labels(h_predictions, model->inverseLabelsMap_);
return Rcpp::IntegerVector(h_predictions.begin(), h_predictions.end());
}
__host__ Rcpp::NumericMatrix rf_classifier_predict_class_probabilities(
SEXP model_xptr, Rcpp::NumericMatrix const& input) {
#ifndef CUML4R_TREELITE_C_API_MISSING
auto const input_m = cuml4r::Matrix<float>(input, /*transpose=*/false);
int const n_samples = input_m.numRows;
int const n_features = input_m.numCols;
auto model = Rcpp::XPtr<RandomForestClassifierModel>(model_xptr);
int const num_classes = model->inverseLabelsMap_.size();
ModelHandle tl_handle;
ML::build_treelite_forest(
/*model=*/&tl_handle,
/*forest=*/model->rf_.get(),
/*num_features=*/n_features,
/*task_category=*/num_classes);
auto stream_view = cuml4r::stream_allocator::getOrCreateStream();
raft::handle_t handle;
cuml4r::handle_utils::initializeHandle(handle, stream_view.value());
ML::fil::forest_t forest;
ML::fil::treelite_params_t params;
params.algo = ML::fil::algo_t::ALGO_AUTO;
// output class probabilities instead of classes
params.output_class = false;
params.storage_type = ML::fil::storage_type_t::AUTO;
params.blocks_per_sm = 0;
params.threads_per_tree = 1;
params.n_items = 0;
params.pforest_shape_str = nullptr;
ML::fil::from_treelite(handle, /*pforest=*/&forest,
/*model=*/tl_handle, /*tl_params=*/¶ms);
// FIL input
auto const& h_x = input_m.values;
thrust::device_vector<float> d_x(h_x.size());
auto CUML4R_ANONYMOUS_VARIABLE(x_h2d) = cuml4r::async_copy(
handle.get_stream(), h_x.cbegin(), h_x.cend(), d_x.begin());
// FIL output
thrust::device_vector<float> d_preds(num_classes * n_samples);
ML::fil::predict(/*h=*/handle, /*f=*/forest,
/*preds=*/d_preds.data().get(),
/*data=*/d_x.data().get(), /*num_rows=*/n_samples,
/*predict_proba=*/true);
cuml4r::pinned_host_vector<float> h_preds(d_preds.size());
auto CUML4R_ANONYMOUS_VARIABLE(preds_d2h) = cuml4r::async_copy(
handle.get_stream(), d_preds.cbegin(), d_preds.cend(), h_preds.begin());
CUDA_RT_CALL(cudaStreamSynchronize(handle.get_stream()));
return Rcpp::transpose(
Rcpp::NumericMatrix(num_classes, n_samples, h_preds.begin()));
#else
return {};
#endif
}
} // namespace cuml4r
|
3be1ed4c528f57a392124cf4246db2f29929ed2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/rsqrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
RsqrtGradientCUDAKernel(const int size, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, size) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * math::utils::Cube<T>(__ldg(Y + i)) *
static_cast<T>(-0.5);
#else
dX[i] = dY[i] * math::utils::Cube<T>(Y[i]) * static_cast<T>(-0.5);
#endif
}
}
} // namespace
template <>
template <typename T>
bool RsqrtGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* Y_dims */,
const T* dY,
const T* Y,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( RsqrtGradientCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dY, Y, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Rsqrt,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
RsqrtFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
RsqrtGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
RsqrtGradientFunctor<CUDAContext>>);
} // namespace caffe2
| 3be1ed4c528f57a392124cf4246db2f29929ed2b.cu | #include "caffe2/operators/rsqrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
RsqrtGradientCUDAKernel(const int size, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, size) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * math::utils::Cube<T>(__ldg(Y + i)) *
static_cast<T>(-0.5);
#else
dX[i] = dY[i] * math::utils::Cube<T>(Y[i]) * static_cast<T>(-0.5);
#endif
}
}
} // namespace
template <>
template <typename T>
bool RsqrtGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* Y_dims */,
const T* dY,
const T* Y,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
RsqrtGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
return true;
}
REGISTER_CUDA_OPERATOR(
Rsqrt,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
RsqrtFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
RsqrtGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
RsqrtGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
f0aec791417d35cd07a13ed9f7212543de792e93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define SIZE (1024*1024)
__global__ void addVector(float* left, float* right, float* result)
{
int idx = threadIdx.x;
result[idx] = left[idx] + right[idx];
}
__host__ int main()
{
float* vec1 = new float[SIZE];
float* vec2 = new float[SIZE];
float* vec3 = new float[SIZE];
for (int i = 0; i < SIZE; i++)
{
vec1[i] = i;
vec2[i] = i;
}
float* devVec1;
float* devVec2;
float* devVec3;
hipEvent_t start, stop;
hipMalloc((void**)&devVec1, sizeof(float) * SIZE);
hipMalloc((void**)&devVec2, sizeof(float) * SIZE);
hipMalloc((void**)&devVec3, sizeof(float) * SIZE);
hipMemcpy(devVec1, vec1, sizeof(float) * SIZE, hipMemcpyHostToDevice);
hipMemcpy(devVec2, vec2, sizeof(float) * SIZE, hipMemcpyHostToDevice);
int block = 512;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( addVector), dim3(SIZE/512), dim3(block), 0, 0, devVec1, devVec2, devVec3);
hipEventRecord(stop);
hipEvent_t syncEvent;
hipEventCreate(&syncEvent);
hipEventRecord(syncEvent, 0);
hipEventSynchronize(syncEvent);
hipMemcpy(vec3, devVec3, sizeof(float) * SIZE, hipMemcpyDeviceToHost);
float time = 0;
// for (int i = 0; i < SIZE; i++)
// printf("Element #%i: %.1f\n", i , vec3[i]);
hipEventElapsedTime(&time, start, stop);
printf("Elapsed time: %f\n", time);
FILE *f = fopen("time.txt", "a+");
if (f == NULL) {
fprintf(stderr, "FILE ERROR!\n");
} else {
fprintf(f, "%f 512\n", time);
}
fclose(f);
hipEventDestroy(syncEvent);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(devVec1);
hipFree(devVec2);
hipFree(devVec3);
return EXIT_SUCCESS;
}
| f0aec791417d35cd07a13ed9f7212543de792e93.cu | #include <stdio.h>
#include <stdlib.h>
#define SIZE (1024*1024)
__global__ void addVector(float* left, float* right, float* result)
{
int idx = threadIdx.x;
result[idx] = left[idx] + right[idx];
}
__host__ int main()
{
float* vec1 = new float[SIZE];
float* vec2 = new float[SIZE];
float* vec3 = new float[SIZE];
for (int i = 0; i < SIZE; i++)
{
vec1[i] = i;
vec2[i] = i;
}
float* devVec1;
float* devVec2;
float* devVec3;
cudaEvent_t start, stop;
cudaMalloc((void**)&devVec1, sizeof(float) * SIZE);
cudaMalloc((void**)&devVec2, sizeof(float) * SIZE);
cudaMalloc((void**)&devVec3, sizeof(float) * SIZE);
cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(devVec2, vec2, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
int block = 512;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
addVector<<<SIZE/512, block>>>(devVec1, devVec2, devVec3);
cudaEventRecord(stop);
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaMemcpy(vec3, devVec3, sizeof(float) * SIZE, cudaMemcpyDeviceToHost);
float time = 0;
// for (int i = 0; i < SIZE; i++)
// printf("Element #%i: %.1f\n", i , vec3[i]);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time: %f\n", time);
FILE *f = fopen("time.txt", "a+");
if (f == NULL) {
fprintf(stderr, "FILE ERROR!\n");
} else {
fprintf(f, "%f 512\n", time);
}
fclose(f);
cudaEventDestroy(syncEvent);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(devVec1);
cudaFree(devVec2);
cudaFree(devVec3);
return EXIT_SUCCESS;
}
|
0645cc044180a934b9793cbe04f01dd5b12367e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
uchar4 *d_inputImageRGBA__;
uchar4 *d_outputImageRGBA__;
float *h_filter__;
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols, const float* const filter, const int filterWidth) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[row * numCols + col] = result;
}
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x;
int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y;
if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows )
return;
int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
__global__ void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) {
//allocate memory for the three different channels
//original
hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage);
hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage);
hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage);
//Allocate memory for the filter on the GPU
hipMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth);
hipMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,hipMemcpyHostToDevice);
}
void cleanup() {
hipFree(d_red);
hipFree(d_green);
hipFree(d_blue);
hipFree(d_filter);
}
void setFilter(float **h_filter, int *filterWidth, int blurKernelWidth, float blurKernelSigma) {
//Normally blurKernelWidth = 9 and blurKernelSigma = 2.0
*h_filter = new float[blurKernelWidth * blurKernelWidth];
*filterWidth = blurKernelWidth;
float filterSum = 0.f; //for normalization
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * blurKernelSigma * blurKernelSigma));
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r)
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c)
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] *= normalizationFactor;
}
uchar4* blur_ops(uchar4* d_inputImageRGBA, size_t numRows, size_t numCols, int blurKernelWidth) {
float blurKernelSigma = blurKernelWidth/4.0f;
//Set filter array
float* h_filter;
int filterWidth;
setFilter(&h_filter, &filterWidth, blurKernelWidth, blurKernelSigma);
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16,16,1);
//Calculate Grid SIze
int a=numCols/blockSize.x, b=numRows/blockSize.y;
const dim3 gridSize(a+1,b+1,1);
const size_t numPixels = numRows * numCols;
uchar4 *d_outputImageRGBA;
hipMalloc((void **)&d_outputImageRGBA, sizeof(uchar4) * numPixels);
hipMemset(d_outputImageRGBA, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around
d_inputImageRGBA__ = d_inputImageRGBA;
d_outputImageRGBA__ = d_outputImageRGBA;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//blurred
unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred;
hipMalloc(&d_redBlurred, sizeof(unsigned char) * numPixels);
hipMalloc(&d_greenBlurred, sizeof(unsigned char) * numPixels);
hipMalloc(&d_blueBlurred, sizeof(unsigned char) * numPixels);
hipMemset(d_redBlurred, 0, sizeof(unsigned char) * numPixels);
hipMemset(d_greenBlurred, 0, sizeof(unsigned char) * numPixels);
hipMemset(d_blueBlurred, 0, sizeof(unsigned char) * numPixels);
allocateMemoryAndCopyToGPU(numRows, numCols, h_filter, filterWidth);
hipEventRecord(start, 0);
//Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red,d_green, d_blue);
hipDeviceSynchronize();
//Call blur kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize();
//Now we recombine the results.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float gpu_ms;
hipEventElapsedTime(&gpu_ms, start, stop);
printf("GPU execution time for Gaussian Blur: %f\n", gpu_ms);
//cleanup memory
cleanup();
hipFree(d_redBlurred);
hipFree(d_greenBlurred);
hipFree(d_blueBlurred);
hipDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
hipMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//cleanup memory on device
hipFree(d_inputImageRGBA__);
hipFree(d_outputImageRGBA__);
delete[] h_filter__;
//return h_out
return h_out;
} | 0645cc044180a934b9793cbe04f01dd5b12367e7.cu | #include <cuda_runtime.h>
#include <stdio.h>
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
uchar4 *d_inputImageRGBA__;
uchar4 *d_outputImageRGBA__;
float *h_filter__;
__global__ void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols, const float* const filter, const int filterWidth) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
return;
float result = 0.f;
//For every value in the filter around the pixel (c, r)
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[row * numCols + col] = result;
}
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel) {
int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x;
int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y;
if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows )
return;
int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
__global__ void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) {
//allocate memory for the three different channels
//original
cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage);
cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage);
cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage);
//Allocate memory for the filter on the GPU
cudaMalloc(&d_filter, sizeof(float)*filterWidth*filterWidth);
cudaMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,cudaMemcpyHostToDevice);
}
void cleanup() {
cudaFree(d_red);
cudaFree(d_green);
cudaFree(d_blue);
cudaFree(d_filter);
}
void setFilter(float **h_filter, int *filterWidth, int blurKernelWidth, float blurKernelSigma) {
//Normally blurKernelWidth = 9 and blurKernelSigma = 2.0
*h_filter = new float[blurKernelWidth * blurKernelWidth];
*filterWidth = blurKernelWidth;
float filterSum = 0.f; //for normalization
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r) {
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * blurKernelSigma * blurKernelSigma));
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -blurKernelWidth/2; r <= blurKernelWidth/2; ++r)
for (int c = -blurKernelWidth/2; c <= blurKernelWidth/2; ++c)
(*h_filter)[(r + blurKernelWidth/2) * blurKernelWidth + c + blurKernelWidth/2] *= normalizationFactor;
}
uchar4* blur_ops(uchar4* d_inputImageRGBA, size_t numRows, size_t numCols, int blurKernelWidth) {
float blurKernelSigma = blurKernelWidth/4.0f;
//Set filter array
float* h_filter;
int filterWidth;
setFilter(&h_filter, &filterWidth, blurKernelWidth, blurKernelSigma);
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(16,16,1);
//Calculate Grid SIze
int a=numCols/blockSize.x, b=numRows/blockSize.y;
const dim3 gridSize(a+1,b+1,1);
const size_t numPixels = numRows * numCols;
uchar4 *d_outputImageRGBA;
cudaMalloc((void **)&d_outputImageRGBA, sizeof(uchar4) * numPixels);
cudaMemset(d_outputImageRGBA, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around
d_inputImageRGBA__ = d_inputImageRGBA;
d_outputImageRGBA__ = d_outputImageRGBA;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//blurred
unsigned char *d_redBlurred, *d_greenBlurred, *d_blueBlurred;
cudaMalloc(&d_redBlurred, sizeof(unsigned char) * numPixels);
cudaMalloc(&d_greenBlurred, sizeof(unsigned char) * numPixels);
cudaMalloc(&d_blueBlurred, sizeof(unsigned char) * numPixels);
cudaMemset(d_redBlurred, 0, sizeof(unsigned char) * numPixels);
cudaMemset(d_greenBlurred, 0, sizeof(unsigned char) * numPixels);
cudaMemset(d_blueBlurred, 0, sizeof(unsigned char) * numPixels);
allocateMemoryAndCopyToGPU(numRows, numCols, h_filter, filterWidth);
cudaEventRecord(start, 0);
//Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red,d_green, d_blue);
cudaDeviceSynchronize();
//Call blur kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize();
//Now we recombine the results.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float gpu_ms;
cudaEventElapsedTime(&gpu_ms, start, stop);
printf("GPU execution time for Gaussian Blur: %f\n", gpu_ms);
//cleanup memory
cleanup();
cudaFree(d_redBlurred);
cudaFree(d_greenBlurred);
cudaFree(d_blueBlurred);
cudaDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
cudaMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//cleanup memory on device
cudaFree(d_inputImageRGBA__);
cudaFree(d_outputImageRGBA__);
delete[] h_filter__;
//return h_out
return h_out;
} |
e8d6c1471da5ce3a063cd307c72913e66fc541ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <gtest/gtest.h>
#include <iostream>
#include <vector>
#include <cuml/manifold/umapparams.h>
#include <datasets/digits.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <cuml/common/device_buffer.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/manifold/umap.hpp>
#include <cuml/neighbors/knn.hpp>
#include <linalg/reduce_rows_by_key.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/distance/distance.cuh>
#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>
#include <selection/knn.cuh>
#include <umap/runner.cuh>
using namespace ML;
using namespace ML::Metrics;
using namespace std;
using namespace MLCommon;
using namespace MLCommon::Datasets::Digits;
template <typename T>
__global__ void has_nan_kernel(T* data, size_t len, bool* answer)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
bool val = data[tid];
if (val != val) { *answer = true; }
}
template <typename T>
bool has_nan(T* data,
size_t len,
std::shared_ptr<raft::mr::device::allocator> alloc,
hipStream_t stream)
{
dim3 blk(256);
dim3 grid(raft::ceildiv(len, (size_t)blk.x));
bool h_answer = false;
device_buffer<bool> d_answer(alloc, stream, 1);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
hipLaunchKernelGGL(( has_nan_kernel), dim3(grid), dim3(blk), 0, stream, data, len, d_answer.data());
raft::update_host(&h_answer, d_answer.data(), 1, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
return h_answer;
}
template <typename T>
__global__ void are_equal_kernel(T* embedding1, T* embedding2, size_t len, double* diff)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (embedding1[tid] != embedding2[tid]) {
atomicAdd(diff, abs(embedding1[tid] - embedding2[tid]));
}
}
template <typename T>
bool are_equal(T* embedding1,
T* embedding2,
size_t len,
std::shared_ptr<raft::mr::device::allocator> alloc,
hipStream_t stream)
{
double h_answer = 0.;
device_buffer<double> d_answer(alloc, stream, 1);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
hipLaunchKernelGGL(( are_equal_kernel), dim3(raft::ceildiv(len, (size_t)32)), dim3(32), 0, stream,
embedding1, embedding2, len, d_answer.data());
raft::update_host(&h_answer, d_answer.data(), 1, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double tolerance = 1.0;
if (h_answer > tolerance) {
std::cout << "Not equal, difference : " << h_answer << std::endl;
return false;
}
return true;
}
class UMAPParametrizableTest : public ::testing::Test {
protected:
struct TestParams {
bool fit_transform;
bool supervised;
bool knn_params;
int n_samples;
int n_features;
int n_clusters;
double min_trustworthiness;
};
void get_embedding(raft::handle_t& handle,
float* X,
float* y,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
hipStream_t stream = handle.get_stream();
auto alloc = handle.get_device_allocator();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
device_buffer<int64_t>* knn_indices_b;
device_buffer<float>* knn_dists_b;
int64_t* knn_indices = nullptr;
float* knn_dists = nullptr;
if (test_params.knn_params) {
knn_indices_b =
new device_buffer<int64_t>(alloc, stream, n_samples * umap_params.n_neighbors);
knn_dists_b = new device_buffer<float>(alloc, stream, n_samples * umap_params.n_neighbors);
knn_indices = knn_indices_b->data();
knn_dists = knn_dists_b->data();
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = X;
sizes[0] = n_samples;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
n_features,
X,
n_samples,
knn_indices,
knn_dists,
umap_params.n_neighbors);
CUDA_CHECK(hipStreamSynchronize(stream));
}
float* model_embedding = nullptr;
device_buffer<float>* model_embedding_b;
if (test_params.fit_transform) {
model_embedding = embedding_ptr;
} else {
model_embedding_b =
new device_buffer<float>(alloc, stream, n_samples * umap_params.n_components);
model_embedding = model_embedding_b->data();
}
CUDA_CHECK(hipMemsetAsync(
model_embedding, 0, n_samples * umap_params.n_components * sizeof(float), stream));
CUDA_CHECK(hipStreamSynchronize(stream));
if (test_params.supervised) {
ML::UMAP::fit(
handle, X, y, n_samples, n_features, knn_indices, knn_dists, &umap_params, model_embedding);
} else {
ML::UMAP::fit(handle,
X,
nullptr,
n_samples,
n_features,
knn_indices,
knn_dists,
&umap_params,
model_embedding);
}
CUDA_CHECK(hipStreamSynchronize(stream));
if (!test_params.fit_transform) {
CUDA_CHECK(hipMemsetAsync(
embedding_ptr, 0, n_samples * umap_params.n_components * sizeof(float), stream));
CUDA_CHECK(hipStreamSynchronize(stream));
ML::UMAP::transform(handle,
X,
n_samples,
umap_params.n_components,
knn_indices,
knn_dists,
X,
n_samples,
model_embedding,
n_samples,
&umap_params,
embedding_ptr);
CUDA_CHECK(hipStreamSynchronize(stream));
delete model_embedding_b;
}
if (test_params.knn_params) {
delete knn_indices_b;
delete knn_dists_b;
}
}
void assertions(raft::handle_t& handle,
float* X,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
hipStream_t stream = handle.get_stream();
auto alloc = handle.get_device_allocator();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
ASSERT_TRUE(!has_nan(embedding_ptr, n_samples * umap_params.n_components, alloc, stream));
double trustworthiness =
trustworthiness_score<float, raft::distance::DistanceType::L2SqrtUnexpanded>(
handle,
X,
embedding_ptr,
n_samples,
n_features,
umap_params.n_components,
umap_params.n_neighbors);
std::cout << "min. expected trustworthiness: " << test_params.min_trustworthiness << std::endl;
std::cout << "trustworthiness: " << trustworthiness << std::endl;
ASSERT_TRUE(trustworthiness > test_params.min_trustworthiness);
}
void test(TestParams& test_params, UMAPParams& umap_params)
{
std::cout << "\numap_params : [" << std::boolalpha << umap_params.n_neighbors << "-"
<< umap_params.n_components << "-" << umap_params.n_epochs << "-"
<< umap_params.random_state << std::endl;
std::cout << "test_params : [" << std::boolalpha << test_params.fit_transform << "-"
<< test_params.supervised << "-" << test_params.knn_params << "-"
<< test_params.n_samples << "-" << test_params.n_features << "-"
<< test_params.n_clusters << "-" << test_params.min_trustworthiness << "]"
<< std::endl;
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
auto alloc = handle.get_device_allocator();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
UMAP::find_ab(handle, &umap_params);
device_buffer<float> X_d(alloc, stream, n_samples * n_features);
device_buffer<int> y_d(alloc, stream, n_samples);
ML::Datasets::make_blobs(handle,
X_d.data(),
y_d.data(),
n_samples,
n_features,
test_params.n_clusters,
true,
nullptr,
nullptr,
1.f,
true,
-10.f,
10.f,
1234ULL);
CUDA_CHECK(hipStreamSynchronize(stream));
MLCommon::LinAlg::convert_array((float*)y_d.data(), y_d.data(), n_samples, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
device_buffer<float> embeddings1(alloc, stream, n_samples * umap_params.n_components);
float* e1 = embeddings1.data();
get_embedding(handle, X_d.data(), (float*)y_d.data(), e1, test_params, umap_params);
assertions(handle, X_d.data(), e1, test_params, umap_params);
// v21.08: Reproducibility looks to be busted for CTK 11.4. Need to figure out
// why this is happening and re-enable this.
#if CUDART_VERSION == 11040
return;
#else
// Disable reproducibility tests after transformation
if (!test_params.fit_transform) { return; }
#endif
device_buffer<float> embeddings2(alloc, stream, n_samples * umap_params.n_components);
float* e2 = embeddings2.data();
get_embedding(handle, X_d.data(), (float*)y_d.data(), e2, test_params, umap_params);
#if CUDART_VERSION >= 11020
bool equal = are_equal(e1, e2, n_samples * umap_params.n_components, alloc, stream);
if (!equal) {
raft::print_device_vector("e1", e1, 25, std::cout);
raft::print_device_vector("e2", e2, 25, std::cout);
}
ASSERT_TRUE(equal);
#else
ASSERT_TRUE(
raft::devArrMatch(e1, e2, n_samples * umap_params.n_components, raft::Compare<float>{}));
#endif
}
void SetUp() override
{
std::vector<TestParams> test_params_vec = {{false, false, false, 2000, 50, 20, 0.45},
{true, false, false, 2000, 50, 20, 0.45},
{false, true, false, 2000, 50, 20, 0.45},
{false, false, true, 2000, 50, 20, 0.45},
{true, true, false, 2000, 50, 20, 0.45},
{true, false, true, 2000, 50, 20, 0.45},
{false, true, true, 2000, 50, 20, 0.45},
{true, true, true, 2000, 50, 20, 0.45}};
std::vector<UMAPParams> umap_params_vec(4);
umap_params_vec[0].n_components = 2;
umap_params_vec[1].n_components = 10;
umap_params_vec[2].n_components = 21;
umap_params_vec[2].random_state = 43;
umap_params_vec[2].init = 0;
umap_params_vec[2].n_epochs = 500;
umap_params_vec[3].n_components = 25;
umap_params_vec[3].random_state = 43;
umap_params_vec[3].init = 0;
umap_params_vec[3].n_epochs = 500;
for (auto& umap_params : umap_params_vec) {
for (auto& test_params : test_params_vec) {
test(test_params, umap_params);
}
}
}
void TearDown() override {}
};
typedef UMAPParametrizableTest UMAPParametrizableTest;
TEST_F(UMAPParametrizableTest, Result) {}
| e8d6c1471da5ce3a063cd307c72913e66fc541ab.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/metrics/metrics.hpp>
#include <gtest/gtest.h>
#include <iostream>
#include <vector>
#include <cuml/manifold/umapparams.h>
#include <datasets/digits.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <cuml/common/device_buffer.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/manifold/umap.hpp>
#include <cuml/neighbors/knn.hpp>
#include <linalg/reduce_rows_by_key.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/distance/distance.cuh>
#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>
#include <selection/knn.cuh>
#include <umap/runner.cuh>
using namespace ML;
using namespace ML::Metrics;
using namespace std;
using namespace MLCommon;
using namespace MLCommon::Datasets::Digits;
template <typename T>
__global__ void has_nan_kernel(T* data, size_t len, bool* answer)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
bool val = data[tid];
if (val != val) { *answer = true; }
}
template <typename T>
bool has_nan(T* data,
size_t len,
std::shared_ptr<raft::mr::device::allocator> alloc,
cudaStream_t stream)
{
dim3 blk(256);
dim3 grid(raft::ceildiv(len, (size_t)blk.x));
bool h_answer = false;
device_buffer<bool> d_answer(alloc, stream, 1);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
has_nan_kernel<<<grid, blk, 0, stream>>>(data, len, d_answer.data());
raft::update_host(&h_answer, d_answer.data(), 1, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
return h_answer;
}
template <typename T>
__global__ void are_equal_kernel(T* embedding1, T* embedding2, size_t len, double* diff)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (embedding1[tid] != embedding2[tid]) {
atomicAdd(diff, abs(embedding1[tid] - embedding2[tid]));
}
}
template <typename T>
bool are_equal(T* embedding1,
T* embedding2,
size_t len,
std::shared_ptr<raft::mr::device::allocator> alloc,
cudaStream_t stream)
{
double h_answer = 0.;
device_buffer<double> d_answer(alloc, stream, 1);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
are_equal_kernel<<<raft::ceildiv(len, (size_t)32), 32, 0, stream>>>(
embedding1, embedding2, len, d_answer.data());
raft::update_host(&h_answer, d_answer.data(), 1, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double tolerance = 1.0;
if (h_answer > tolerance) {
std::cout << "Not equal, difference : " << h_answer << std::endl;
return false;
}
return true;
}
class UMAPParametrizableTest : public ::testing::Test {
protected:
struct TestParams {
bool fit_transform;
bool supervised;
bool knn_params;
int n_samples;
int n_features;
int n_clusters;
double min_trustworthiness;
};
void get_embedding(raft::handle_t& handle,
float* X,
float* y,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
cudaStream_t stream = handle.get_stream();
auto alloc = handle.get_device_allocator();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
device_buffer<int64_t>* knn_indices_b;
device_buffer<float>* knn_dists_b;
int64_t* knn_indices = nullptr;
float* knn_dists = nullptr;
if (test_params.knn_params) {
knn_indices_b =
new device_buffer<int64_t>(alloc, stream, n_samples * umap_params.n_neighbors);
knn_dists_b = new device_buffer<float>(alloc, stream, n_samples * umap_params.n_neighbors);
knn_indices = knn_indices_b->data();
knn_dists = knn_dists_b->data();
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = X;
sizes[0] = n_samples;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
n_features,
X,
n_samples,
knn_indices,
knn_dists,
umap_params.n_neighbors);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
float* model_embedding = nullptr;
device_buffer<float>* model_embedding_b;
if (test_params.fit_transform) {
model_embedding = embedding_ptr;
} else {
model_embedding_b =
new device_buffer<float>(alloc, stream, n_samples * umap_params.n_components);
model_embedding = model_embedding_b->data();
}
CUDA_CHECK(cudaMemsetAsync(
model_embedding, 0, n_samples * umap_params.n_components * sizeof(float), stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
if (test_params.supervised) {
ML::UMAP::fit(
handle, X, y, n_samples, n_features, knn_indices, knn_dists, &umap_params, model_embedding);
} else {
ML::UMAP::fit(handle,
X,
nullptr,
n_samples,
n_features,
knn_indices,
knn_dists,
&umap_params,
model_embedding);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
if (!test_params.fit_transform) {
CUDA_CHECK(cudaMemsetAsync(
embedding_ptr, 0, n_samples * umap_params.n_components * sizeof(float), stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
ML::UMAP::transform(handle,
X,
n_samples,
umap_params.n_components,
knn_indices,
knn_dists,
X,
n_samples,
model_embedding,
n_samples,
&umap_params,
embedding_ptr);
CUDA_CHECK(cudaStreamSynchronize(stream));
delete model_embedding_b;
}
if (test_params.knn_params) {
delete knn_indices_b;
delete knn_dists_b;
}
}
void assertions(raft::handle_t& handle,
float* X,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
cudaStream_t stream = handle.get_stream();
auto alloc = handle.get_device_allocator();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
ASSERT_TRUE(!has_nan(embedding_ptr, n_samples * umap_params.n_components, alloc, stream));
double trustworthiness =
trustworthiness_score<float, raft::distance::DistanceType::L2SqrtUnexpanded>(
handle,
X,
embedding_ptr,
n_samples,
n_features,
umap_params.n_components,
umap_params.n_neighbors);
std::cout << "min. expected trustworthiness: " << test_params.min_trustworthiness << std::endl;
std::cout << "trustworthiness: " << trustworthiness << std::endl;
ASSERT_TRUE(trustworthiness > test_params.min_trustworthiness);
}
void test(TestParams& test_params, UMAPParams& umap_params)
{
std::cout << "\numap_params : [" << std::boolalpha << umap_params.n_neighbors << "-"
<< umap_params.n_components << "-" << umap_params.n_epochs << "-"
<< umap_params.random_state << std::endl;
std::cout << "test_params : [" << std::boolalpha << test_params.fit_transform << "-"
<< test_params.supervised << "-" << test_params.knn_params << "-"
<< test_params.n_samples << "-" << test_params.n_features << "-"
<< test_params.n_clusters << "-" << test_params.min_trustworthiness << "]"
<< std::endl;
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
auto alloc = handle.get_device_allocator();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
UMAP::find_ab(handle, &umap_params);
device_buffer<float> X_d(alloc, stream, n_samples * n_features);
device_buffer<int> y_d(alloc, stream, n_samples);
ML::Datasets::make_blobs(handle,
X_d.data(),
y_d.data(),
n_samples,
n_features,
test_params.n_clusters,
true,
nullptr,
nullptr,
1.f,
true,
-10.f,
10.f,
1234ULL);
CUDA_CHECK(cudaStreamSynchronize(stream));
MLCommon::LinAlg::convert_array((float*)y_d.data(), y_d.data(), n_samples, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
device_buffer<float> embeddings1(alloc, stream, n_samples * umap_params.n_components);
float* e1 = embeddings1.data();
get_embedding(handle, X_d.data(), (float*)y_d.data(), e1, test_params, umap_params);
assertions(handle, X_d.data(), e1, test_params, umap_params);
// v21.08: Reproducibility looks to be busted for CTK 11.4. Need to figure out
// why this is happening and re-enable this.
#if CUDART_VERSION == 11040
return;
#else
// Disable reproducibility tests after transformation
if (!test_params.fit_transform) { return; }
#endif
device_buffer<float> embeddings2(alloc, stream, n_samples * umap_params.n_components);
float* e2 = embeddings2.data();
get_embedding(handle, X_d.data(), (float*)y_d.data(), e2, test_params, umap_params);
#if CUDART_VERSION >= 11020
bool equal = are_equal(e1, e2, n_samples * umap_params.n_components, alloc, stream);
if (!equal) {
raft::print_device_vector("e1", e1, 25, std::cout);
raft::print_device_vector("e2", e2, 25, std::cout);
}
ASSERT_TRUE(equal);
#else
ASSERT_TRUE(
raft::devArrMatch(e1, e2, n_samples * umap_params.n_components, raft::Compare<float>{}));
#endif
}
void SetUp() override
{
std::vector<TestParams> test_params_vec = {{false, false, false, 2000, 50, 20, 0.45},
{true, false, false, 2000, 50, 20, 0.45},
{false, true, false, 2000, 50, 20, 0.45},
{false, false, true, 2000, 50, 20, 0.45},
{true, true, false, 2000, 50, 20, 0.45},
{true, false, true, 2000, 50, 20, 0.45},
{false, true, true, 2000, 50, 20, 0.45},
{true, true, true, 2000, 50, 20, 0.45}};
std::vector<UMAPParams> umap_params_vec(4);
umap_params_vec[0].n_components = 2;
umap_params_vec[1].n_components = 10;
umap_params_vec[2].n_components = 21;
umap_params_vec[2].random_state = 43;
umap_params_vec[2].init = 0;
umap_params_vec[2].n_epochs = 500;
umap_params_vec[3].n_components = 25;
umap_params_vec[3].random_state = 43;
umap_params_vec[3].init = 0;
umap_params_vec[3].n_epochs = 500;
for (auto& umap_params : umap_params_vec) {
for (auto& test_params : test_params_vec) {
test(test_params, umap_params);
}
}
}
void TearDown() override {}
};
typedef UMAPParametrizableTest UMAPParametrizableTest;
TEST_F(UMAPParametrizableTest, Result) {}
|
dea5e25ba87ed5d2eec4d9d3132f1503878d9b21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
First attempt at a parallel version of merge sort
Eric Soler 11/2015
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "gputimer.h"
#define ARRAY_SIZE 100000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++){
printf("%d ", array[i]);
if(i == 50)
printf("\n");
}
printf("}\n");
}
__global__ void merge(int* a,int blockSize,int arraySize){
int s1;
int s2;
int end;
int start;
int mid;
start = blockSize*2*(blockIdx.x * blockDim.x + threadIdx.x);
s1 = start;
s2 = s1 + blockSize;
end = s2 + blockSize;
mid = s1 + blockSize;
if((s2 < arraySize))
{
if(end > arraySize)
end = arraySize;
if(mid > arraySize)
mid = arraySize;
int* tA = new int[end - start];
int counter = 0;
while(counter < end - start)
{
if(s1 < mid && s2 < end)
{
if(a[s1] <= a[s2])
tA[counter++] = a[s1++];
else
tA[counter++] = a[s2++];
}
else if(s1 < mid)
{
tA[counter++] = a[s1++];
}
else if(s2 < end)
{
tA[counter++] = a[s2++];
}
else
{
tA[counter++] = -66;
}
}
for(int i = 0, j = start; i < end - start; i++, j++)
{
a[j] = tA[i];
}
delete [] tA;
}
}
int main(int argc,char **argv)
{
GpuTimer timer;
srand(time(NULL));
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_array[i] = rand()%10;
}
//print_array(h_array, ARRAY_SIZE);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
hipMemcpy(d_array, h_array, ARRAY_BYTES, hipMemcpyHostToDevice);
int numOfThreads;
int blockWidth = 1000;
int subArraySize;
double x = log(ARRAY_SIZE) / log(2);
timer.Start();
int numberOfBlocks;
for(int i = 0; i < x; i++)
{
subArraySize = pow(2,i);
numOfThreads = ceil(ARRAY_SIZE/(subArraySize * 2.0));
numberOfBlocks = ceil(numOfThreads/((float)blockWidth));
hipLaunchKernelGGL(( merge), dim3(numberOfBlocks), ceil(numOfThreads/((float)numberOfBlocks))
, 0, 0, d_array, subArraySize, ARRAY_SIZE);
hipDeviceSynchronize();
}
timer.Stop();
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
hipFree(d_array);
return 0;
} | dea5e25ba87ed5d2eec4d9d3132f1503878d9b21.cu | /*
First attempt at a parallel version of merge sort
Eric Soler 11/2015
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "gputimer.h"
#define ARRAY_SIZE 100000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++){
printf("%d ", array[i]);
if(i == 50)
printf("\n");
}
printf("}\n");
}
__global__ void merge(int* a,int blockSize,int arraySize){
int s1;
int s2;
int end;
int start;
int mid;
start = blockSize*2*(blockIdx.x * blockDim.x + threadIdx.x);
s1 = start;
s2 = s1 + blockSize;
end = s2 + blockSize;
mid = s1 + blockSize;
if((s2 < arraySize))
{
if(end > arraySize)
end = arraySize;
if(mid > arraySize)
mid = arraySize;
int* tA = new int[end - start];
int counter = 0;
while(counter < end - start)
{
if(s1 < mid && s2 < end)
{
if(a[s1] <= a[s2])
tA[counter++] = a[s1++];
else
tA[counter++] = a[s2++];
}
else if(s1 < mid)
{
tA[counter++] = a[s1++];
}
else if(s2 < end)
{
tA[counter++] = a[s2++];
}
else
{
tA[counter++] = -66;
}
}
for(int i = 0, j = start; i < end - start; i++, j++)
{
a[j] = tA[i];
}
delete [] tA;
}
}
int main(int argc,char **argv)
{
GpuTimer timer;
srand(time(NULL));
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_array[i] = rand()%10;
}
//print_array(h_array, ARRAY_SIZE);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
cudaMemcpy(d_array, h_array, ARRAY_BYTES, cudaMemcpyHostToDevice);
int numOfThreads;
int blockWidth = 1000;
int subArraySize;
double x = log(ARRAY_SIZE) / log(2);
timer.Start();
int numberOfBlocks;
for(int i = 0; i < x; i++)
{
subArraySize = pow(2,i);
numOfThreads = ceil(ARRAY_SIZE/(subArraySize * 2.0));
numberOfBlocks = ceil(numOfThreads/((float)blockWidth));
merge<<<numberOfBlocks, ceil(numOfThreads/((float)numberOfBlocks))
>>>(d_array, subArraySize, ARRAY_SIZE);
cudaDeviceSynchronize();
}
timer.Stop();
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
cudaFree(d_array);
return 0;
} |
a088937908d3abd6d55426f76db674b64d6a5f11.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#define ErrorCheck(ans) { CheckFun((ans), __FILE__, __LINE__); }
inline void CheckFun(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
fprintf(stderr, "ERROR: %s %s %d\n", hipGetErrorString(code), file, line);
exit(0);
}
}
struct cmp {
__host__ __device__
bool operator()(double lhs, double rhs) const {
return fabs(lhs) < fabs(rhs);
}
};
__global__ void rowsPermutation(double * __restrict__ matrix, const uint32_t matrixDim, const uint64_t pitch,
const uint64_t midInColumnID, const uint64_t maxInColumnID) {
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x + midInColumnID;
uint32_t offsetx = blockDim.x * gridDim.x;
for (uint32_t i = idx; i < matrixDim; i += offsetx) {
double tmp = *((double*)((char*)matrix + pitch * i) + midInColumnID);
*((double*)((char*)matrix + pitch * i) + midInColumnID) = *((double*)((char*)matrix + pitch * i) + maxInColumnID);
*((double*)((char*)matrix + pitch * i) + maxInColumnID) = tmp;
}
return;
}
__global__ void updateBotRows(double * __restrict__ matrix, const uint32_t matrixDim, const uint64_t pitch,
const uint64_t midInColumnID, const double midInColumnVal) {
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x + midInColumnID + 1;
uint32_t idy = threadIdx.y + blockIdx.y * blockDim.y + midInColumnID + 1;
uint32_t offsetx = blockDim.x * gridDim.x;
uint32_t offsety = blockDim.y * gridDim.y;
double factor;
for (uint32_t j = idy; j < matrixDim; j += offsety) {
factor = *((double*)((char*)matrix + pitch * midInColumnID) + j);
if (fabs(factor) < 1e-7) continue;
for (uint32_t i = idx; i < matrixDim; i += offsetx) {
*((double*)((char*)matrix + pitch * i) + j) -= *((double*)((char*)matrix + pitch * i) + midInColumnID) * factor / midInColumnVal;
}
}
return;
}
__host__ double findDet(double * __restrict__ matrix, const uint32_t matrixDim) {
double det = 1;
double *matrixDev;
uint64_t devPitch, hostPitch;
hostPitch = sizeof(double) * matrixDim;
hipMallocPitch(&matrixDev, &devPitch, matrixDim * sizeof(double), matrixDim);
hipMemcpy2D(matrixDev, devPitch, matrix, hostPitch, sizeof(double) * matrixDim, matrixDim, hipMemcpyHostToDevice);
for (uint32_t i = 0; i < matrixDim; ++i) {
thrust::device_ptr<double> currColumnPtr((double*)((char*)matrixDev + devPitch * i));
thrust::device_ptr<double> start((double*)((char*)matrixDev + devPitch * i) + i);
thrust::device_ptr<double> end((double*)((char*)matrixDev + devPitch * i) + matrixDim);
thrust::device_ptr<double> maxInColumnPtr = thrust::max_element(start, end, cmp());
uint64_t maxInColumnID = (uint64_t)(maxInColumnPtr - currColumnPtr);
double maxInColumnVal = *maxInColumnPtr;
det *= maxInColumnVal;
if (fabs(maxInColumnVal) < 1e-7) {
det = 0;
break;
}
if (maxInColumnID != i) {
det *= -1;
rowsPermutation << <dim3(64), dim3(64) >> > (matrixDev, matrixDim, devPitch, i, maxInColumnID);
}
if (i != matrixDim - 1) {
updateBotRows << <dim3(32, 32), dim3(1, 512) >> > (matrixDev, matrixDim, devPitch, i, maxInColumnVal);
}
}
hipFree(matrixDev);
return det;
}
int main() {
uint32_t matrixDim;
double *matrix;
scanf("%" SCNu32, &matrixDim);
matrix = (double*)malloc(sizeof(double) * matrixDim * matrixDim);
for (uint32_t i = 0; i < matrixDim; ++i) {
for (uint32_t j = 0; j < matrixDim; ++j) {
scanf("%lf", &matrix[j * matrixDim + i]);
}
}
double det = findDet(matrix, matrixDim);
if (matrixDim == 0) det = 0;
printf("%.10e\n", det);
free(matrix);
return 0;
} | a088937908d3abd6d55426f76db674b64d6a5f11.cu | #include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#define ErrorCheck(ans) { CheckFun((ans), __FILE__, __LINE__); }
inline void CheckFun(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "ERROR: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(0);
}
}
struct cmp {
__host__ __device__
bool operator()(double lhs, double rhs) const {
return fabs(lhs) < fabs(rhs);
}
};
__global__ void rowsPermutation(double * __restrict__ matrix, const uint32_t matrixDim, const uint64_t pitch,
const uint64_t midInColumnID, const uint64_t maxInColumnID) {
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x + midInColumnID;
uint32_t offsetx = blockDim.x * gridDim.x;
for (uint32_t i = idx; i < matrixDim; i += offsetx) {
double tmp = *((double*)((char*)matrix + pitch * i) + midInColumnID);
*((double*)((char*)matrix + pitch * i) + midInColumnID) = *((double*)((char*)matrix + pitch * i) + maxInColumnID);
*((double*)((char*)matrix + pitch * i) + maxInColumnID) = tmp;
}
return;
}
__global__ void updateBotRows(double * __restrict__ matrix, const uint32_t matrixDim, const uint64_t pitch,
const uint64_t midInColumnID, const double midInColumnVal) {
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x + midInColumnID + 1;
uint32_t idy = threadIdx.y + blockIdx.y * blockDim.y + midInColumnID + 1;
uint32_t offsetx = blockDim.x * gridDim.x;
uint32_t offsety = blockDim.y * gridDim.y;
double factor;
for (uint32_t j = idy; j < matrixDim; j += offsety) {
factor = *((double*)((char*)matrix + pitch * midInColumnID) + j);
if (fabs(factor) < 1e-7) continue;
for (uint32_t i = idx; i < matrixDim; i += offsetx) {
*((double*)((char*)matrix + pitch * i) + j) -= *((double*)((char*)matrix + pitch * i) + midInColumnID) * factor / midInColumnVal;
}
}
return;
}
__host__ double findDet(double * __restrict__ matrix, const uint32_t matrixDim) {
double det = 1;
double *matrixDev;
uint64_t devPitch, hostPitch;
hostPitch = sizeof(double) * matrixDim;
cudaMallocPitch(&matrixDev, &devPitch, matrixDim * sizeof(double), matrixDim);
cudaMemcpy2D(matrixDev, devPitch, matrix, hostPitch, sizeof(double) * matrixDim, matrixDim, cudaMemcpyHostToDevice);
for (uint32_t i = 0; i < matrixDim; ++i) {
thrust::device_ptr<double> currColumnPtr((double*)((char*)matrixDev + devPitch * i));
thrust::device_ptr<double> start((double*)((char*)matrixDev + devPitch * i) + i);
thrust::device_ptr<double> end((double*)((char*)matrixDev + devPitch * i) + matrixDim);
thrust::device_ptr<double> maxInColumnPtr = thrust::max_element(start, end, cmp());
uint64_t maxInColumnID = (uint64_t)(maxInColumnPtr - currColumnPtr);
double maxInColumnVal = *maxInColumnPtr;
det *= maxInColumnVal;
if (fabs(maxInColumnVal) < 1e-7) {
det = 0;
break;
}
if (maxInColumnID != i) {
det *= -1;
rowsPermutation << <dim3(64), dim3(64) >> > (matrixDev, matrixDim, devPitch, i, maxInColumnID);
}
if (i != matrixDim - 1) {
updateBotRows << <dim3(32, 32), dim3(1, 512) >> > (matrixDev, matrixDim, devPitch, i, maxInColumnVal);
}
}
cudaFree(matrixDev);
return det;
}
int main() {
uint32_t matrixDim;
double *matrix;
scanf("%" SCNu32, &matrixDim);
matrix = (double*)malloc(sizeof(double) * matrixDim * matrixDim);
for (uint32_t i = 0; i < matrixDim; ++i) {
for (uint32_t j = 0; j < matrixDim; ++j) {
scanf("%lf", &matrix[j * matrixDim + i]);
}
}
double det = findDet(matrix, matrixDim);
if (matrixDim == 0) det = 0;
printf("%.10e\n", det);
free(matrix);
return 0;
} |
245921730b0e870f77b4f6ce48fc603a82441378.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
{
}
__global__ void addKernel(int* c, const int* a, const int* b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | 245921730b0e870f77b4f6ce48fc603a82441378.cu | #include "includes.h"
extern "C"
{
}
__global__ void addKernel(int* c, const int* a, const int* b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
55de7a57a1da4632b703b7e9211f9031fe553d10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* Ported to PCL by Koen Buys : Attention Work in progress!
*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(size, this->_alignment);
this->currentSize += size;
this->_maxSize = ::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(size, this->_alignment);
this->_maxSize = ::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(seg.size, this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
//void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
/*
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
*/
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
hipStream_t cuStream)
{
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( drawRects<T>), dim3(grid), dim3(block), 0, 0, d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
| 55de7a57a1da4632b703b7e9211f9031fe553d10.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* Ported to PCL by Koen Buys : Attention Work in progress!
*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment)
{
NcvBool bProperAlignment = (alignment & (alignment-1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(size, this->_alignment);
this->currentSize += size;
this->_maxSize = std::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(size, this->_alignment);
this->_maxSize = std::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(seg.size, this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
//void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
/*
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
*/
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
cudaStream_t cuStream)
{
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
|
f19ddd8c7437b898a7046cb40301d15b02425107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdexcept>
#include <thread>
#include <crcham/codeword.hpp>
#include <crcham/crc.hpp>
#include <crcham/evaluator.hpp>
#include <crcham/math.hpp>
#include <omp.h>
namespace crcham {
namespace {
template <class CRC>
__global__
void weightsKernel(size_t* weights, CRC crc, size_t message_bits, size_t error_bits) {
size_t codeword_bits = message_bits + crc.length();
size_t codeword_bytes = codeword_bits / 8;
if (codeword_bits % 8 != 0) {
codeword_bytes++;
}
auto codeword = static_cast<uint8_t*>(
malloc(codeword_bytes * sizeof(uint8_t)));
const size_t widx = blockIdx.x * blockDim.x + threadIdx.x;
size_t pincr = gridDim.x * blockDim.x;
uint64_t pidx = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t pmax = ncrll(codeword_bits, error_bits);
size_t weight = 0;
for (; pidx < pmax; pidx += pincr) {
permute(codeword, codeword_bytes, pidx, codeword_bits, error_bits);
uint64_t error_crc = extract(codeword, codeword_bytes, codeword_bits, crc.length());
uint64_t good_crc = crc.compute(codeword, codeword_bytes);
if (error_crc == good_crc) {
weight++;
}
}
weights[widx] = weight;
free(codeword);
}
template <class CRC>
size_t weightsOpenMP(const CRC& crc, size_t message_bits, size_t error_bits)
{
size_t codeword_bits = message_bits + crc.length();
size_t codeword_bytes = codeword_bits / 8;
if (codeword_bits % 8 != 0) {
codeword_bytes++;
}
auto num_threads = ::max(3u, std::thread::hardware_concurrency()) - 2;
auto codewords = new uint8_t[num_threads * codeword_bytes]();
auto weights = new size_t[num_threads]();
uint64_t pmax = ncrll(codeword_bits, error_bits);
#pragma omp parallel for num_threads(num_threads)
for (uint64_t pidx = 0; pidx < pmax; pidx++) {
auto codeword = codewords + codeword_bytes * omp_get_thread_num();
permute(codeword, codeword_bytes, pidx, codeword_bits, error_bits);
uint64_t error_crc = extract(codeword, codeword_bytes, codeword_bits, crc.length());
uint64_t good_crc = crc.compute(codeword, codeword_bytes);
if (error_crc == good_crc) {
weights[omp_get_thread_num()]++;
}
}
delete[] codewords;
size_t weight = 0;
for (size_t i = 0; i < num_threads; i++) {
weight += weights[i];
}
return weight;
}
}
WeightsEvaluator::WeightsEvaluator(uint64_t polynomial, size_t message_bits, size_t error_bits)
: d_polynomial(polynomial)
, d_polylen(crcham::NaiveCRC(polynomial).length())
, d_message(message_bits)
, d_errors(error_bits)
, d_evaluations(crcham::ncrll(message_bits + d_polylen, error_bits))
{
}
template<>
void WeightsEvaluator::run<true>()
{
// Check that there is an available CUDA device
{
int devcnt = 0;
hipGetDeviceCount(&devcnt);
if (devcnt == 0) {
throw std::runtime_error("A supported NVIDIA GPU could not be found.");
}
}
// CPU should not busy-wait for the kernel to finish
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
// Find optimal block and grid sizes
int grid_size;
int block_size;
hipOccupancyMaxPotentialBlockSize(&grid_size, &block_size,
crcham::weightsKernel<crcham::TabularCRC>);
// Set maximum allowable memory sizes
size_t original_heap;
size_t required_heap = 2 * grid_size * block_size * (d_message / 8);
hipDeviceGetLimit(&original_heap, hipLimitMallocHeapSize);
hipDeviceSetLimit(hipLimitMallocHeapSize,
::max(original_heap, required_heap));
// Allocate memory for thread-local weights
size_t* weights;
hipMallocManaged(&weights, grid_size * block_size * sizeof(size_t));
hipMemset(weights, 0, grid_size * block_size * sizeof(size_t));
// Run the kernel and block until it is done
hipEvent_t start_event;
hipEvent_t stop_event;
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event);
if (d_polylen < 8) {
crcham::NaiveCRC ncrc(d_polynomial);
hipLaunchKernelGGL(( crcham::weightsKernel<crcham::NaiveCRC>), dim3(grid_size), dim3(block_size), 0, 0,
weights, ncrc, d_message, d_errors);
}
else {
crcham::TabularCRC tcrc(d_polynomial);
hipLaunchKernelGGL(( crcham::weightsKernel<crcham::TabularCRC>), dim3(grid_size), dim3(block_size), 0, 0,
weights, tcrc, d_message, d_errors);
}
hipEventRecord(stop_event);
hipEventSynchronize(stop_event);
float millis = 0;
hipEventElapsedTime(&millis, start_event, stop_event);
d_elapsed = std::chrono::milliseconds((unsigned long) millis);
// Accumulate results from all threads
d_weight = 0;
for (size_t i = 0; i < grid_size * block_size; i++) {
d_weight += weights[i];
}
hipFree(weights);
}
template<>
void WeightsEvaluator::run<false>()
{
auto timestamp = std::chrono::steady_clock::now();
if (d_polylen < 8) {
crcham::NaiveCRC ncrc(d_polynomial);
d_weight = weightsOpenMP(ncrc, d_message, d_errors);
}
else {
crcham::TabularCRC tcrc(d_polynomial);
d_weight = weightsOpenMP(tcrc, d_message, d_errors);
}
d_elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - timestamp);
}
size_t WeightsEvaluator::evaluations() const {
return d_evaluations;
}
size_t WeightsEvaluator::weight() const {
return d_weight;
}
std::chrono::milliseconds WeightsEvaluator::elapsed() const {
return d_elapsed;
}
}
| f19ddd8c7437b898a7046cb40301d15b02425107.cu | #include <stdexcept>
#include <thread>
#include <crcham/codeword.hpp>
#include <crcham/crc.hpp>
#include <crcham/evaluator.hpp>
#include <crcham/math.hpp>
#include <omp.h>
namespace crcham {
namespace {
template <class CRC>
__global__
void weightsKernel(size_t* weights, CRC crc, size_t message_bits, size_t error_bits) {
size_t codeword_bits = message_bits + crc.length();
size_t codeword_bytes = codeword_bits / 8;
if (codeword_bits % 8 != 0) {
codeword_bytes++;
}
auto codeword = static_cast<uint8_t*>(
malloc(codeword_bytes * sizeof(uint8_t)));
const size_t widx = blockIdx.x * blockDim.x + threadIdx.x;
size_t pincr = gridDim.x * blockDim.x;
uint64_t pidx = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t pmax = ncrll(codeword_bits, error_bits);
size_t weight = 0;
for (; pidx < pmax; pidx += pincr) {
permute(codeword, codeword_bytes, pidx, codeword_bits, error_bits);
uint64_t error_crc = extract(codeword, codeword_bytes, codeword_bits, crc.length());
uint64_t good_crc = crc.compute(codeword, codeword_bytes);
if (error_crc == good_crc) {
weight++;
}
}
weights[widx] = weight;
free(codeword);
}
template <class CRC>
size_t weightsOpenMP(const CRC& crc, size_t message_bits, size_t error_bits)
{
size_t codeword_bits = message_bits + crc.length();
size_t codeword_bytes = codeword_bits / 8;
if (codeword_bits % 8 != 0) {
codeword_bytes++;
}
auto num_threads = std::max(3u, std::thread::hardware_concurrency()) - 2;
auto codewords = new uint8_t[num_threads * codeword_bytes]();
auto weights = new size_t[num_threads]();
uint64_t pmax = ncrll(codeword_bits, error_bits);
#pragma omp parallel for num_threads(num_threads)
for (uint64_t pidx = 0; pidx < pmax; pidx++) {
auto codeword = codewords + codeword_bytes * omp_get_thread_num();
permute(codeword, codeword_bytes, pidx, codeword_bits, error_bits);
uint64_t error_crc = extract(codeword, codeword_bytes, codeword_bits, crc.length());
uint64_t good_crc = crc.compute(codeword, codeword_bytes);
if (error_crc == good_crc) {
weights[omp_get_thread_num()]++;
}
}
delete[] codewords;
size_t weight = 0;
for (size_t i = 0; i < num_threads; i++) {
weight += weights[i];
}
return weight;
}
}
WeightsEvaluator::WeightsEvaluator(uint64_t polynomial, size_t message_bits, size_t error_bits)
: d_polynomial(polynomial)
, d_polylen(crcham::NaiveCRC(polynomial).length())
, d_message(message_bits)
, d_errors(error_bits)
, d_evaluations(crcham::ncrll(message_bits + d_polylen, error_bits))
{
}
template<>
void WeightsEvaluator::run<true>()
{
// Check that there is an available CUDA device
{
int devcnt = 0;
cudaGetDeviceCount(&devcnt);
if (devcnt == 0) {
throw std::runtime_error("A supported NVIDIA GPU could not be found.");
}
}
// CPU should not busy-wait for the kernel to finish
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
// Find optimal block and grid sizes
int grid_size;
int block_size;
cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size,
crcham::weightsKernel<crcham::TabularCRC>);
// Set maximum allowable memory sizes
size_t original_heap;
size_t required_heap = 2 * grid_size * block_size * (d_message / 8);
cudaDeviceGetLimit(&original_heap, cudaLimitMallocHeapSize);
cudaDeviceSetLimit(cudaLimitMallocHeapSize,
std::max(original_heap, required_heap));
// Allocate memory for thread-local weights
size_t* weights;
cudaMallocManaged(&weights, grid_size * block_size * sizeof(size_t));
cudaMemset(weights, 0, grid_size * block_size * sizeof(size_t));
// Run the kernel and block until it is done
cudaEvent_t start_event;
cudaEvent_t stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event);
if (d_polylen < 8) {
crcham::NaiveCRC ncrc(d_polynomial);
crcham::weightsKernel<crcham::NaiveCRC><<<grid_size, block_size>>>(
weights, ncrc, d_message, d_errors);
}
else {
crcham::TabularCRC tcrc(d_polynomial);
crcham::weightsKernel<crcham::TabularCRC><<<grid_size, block_size>>>(
weights, tcrc, d_message, d_errors);
}
cudaEventRecord(stop_event);
cudaEventSynchronize(stop_event);
float millis = 0;
cudaEventElapsedTime(&millis, start_event, stop_event);
d_elapsed = std::chrono::milliseconds((unsigned long) millis);
// Accumulate results from all threads
d_weight = 0;
for (size_t i = 0; i < grid_size * block_size; i++) {
d_weight += weights[i];
}
cudaFree(weights);
}
template<>
void WeightsEvaluator::run<false>()
{
auto timestamp = std::chrono::steady_clock::now();
if (d_polylen < 8) {
crcham::NaiveCRC ncrc(d_polynomial);
d_weight = weightsOpenMP(ncrc, d_message, d_errors);
}
else {
crcham::TabularCRC tcrc(d_polynomial);
d_weight = weightsOpenMP(tcrc, d_message, d_errors);
}
d_elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() - timestamp);
}
size_t WeightsEvaluator::evaluations() const {
return d_evaluations;
}
size_t WeightsEvaluator::weight() const {
return d_weight;
}
std::chrono::milliseconds WeightsEvaluator::elapsed() const {
return d_elapsed;
}
}
|
40c9b8e445ce8ce4a0ead6a8295eaedeee1650a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Mark Gates
@generated from zswap.cu normal z -> d, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void dswap_kernel(
int n,
double *x, int incx,
double *y, int incy )
{
double tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx DOUBLE_PRECISION array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy DOUBLE_PRECISION array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@ingroup magma_dblas1
********************************************************************/
extern "C" void
magmablas_dswap_q(
magma_int_t n,
magmaDouble_ptr dx, magma_int_t incx,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 blocks( (n+NB-1) / NB );
dim3 threads( NB );
hipLaunchKernelGGL(( dswap_kernel), dim3(blocks), dim3(threads), 0, queue , n, dx, incx, dy, incy );
}
/**
@see magmablas_dswap_q
@ingroup magma_dblas1
********************************************************************/
extern "C" void
magmablas_dswap(
magma_int_t n,
magmaDouble_ptr dx, magma_int_t incx,
magmaDouble_ptr dy, magma_int_t incy)
{
magmablas_dswap_q( n, dx, incx, dy, incy, magma_stream );
}
| 40c9b8e445ce8ce4a0ead6a8295eaedeee1650a2.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Mark Gates
@generated from zswap.cu normal z -> d, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void dswap_kernel(
int n,
double *x, int incx,
double *y, int incy )
{
double tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx DOUBLE_PRECISION array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy DOUBLE_PRECISION array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@ingroup magma_dblas1
********************************************************************/
extern "C" void
magmablas_dswap_q(
magma_int_t n,
magmaDouble_ptr dx, magma_int_t incx,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 blocks( (n+NB-1) / NB );
dim3 threads( NB );
dswap_kernel<<< blocks, threads, 0, queue >>>( n, dx, incx, dy, incy );
}
/**
@see magmablas_dswap_q
@ingroup magma_dblas1
********************************************************************/
extern "C" void
magmablas_dswap(
magma_int_t n,
magmaDouble_ptr dx, magma_int_t incx,
magmaDouble_ptr dy, magma_int_t incy)
{
magmablas_dswap_q( n, dx, incx, dy, incy, magma_stream );
}
|
0769378bc05ce15f1e7f8dffc7a08d0d78672d95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <roctracer/roctx.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_WIDTH 32
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
__shared__ float aSh[TILE_WIDTH][TILE_WIDTH];
__shared__ float bSh[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by * TILE_WIDTH + ty;
int j = bx * TILE_WIDTH + tx;
float acc = 0;
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
for (int m = 0; m < (K - 1) / TILE_WIDTH + 1; ++m) {
if (i < M && m * TILE_WIDTH + tx < K) {
aSh[ty][tx] = A(i, m * TILE_WIDTH + tx);
} else {
aSh[ty][tx] = 0;
}
if (j < N && m * TILE_WIDTH + ty < K) {
bSh[ty][tx] = B(m * TILE_WIDTH + ty, j);
} else {
bSh[ty][tx] = 0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
acc += aSh[ty][k] * bSh[k][tx];
}
__syncthreads();
}
if (i < M && j < N) {
C(i, j) = acc;
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1489 x 1493
// B: 1493 x 1499
// C: 1489 x 1499
int m = 1489;
int n = 1499;
int k = 1493;
int nIters = 5;
int nWarmup = 5;
bool check = false;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
parser.add_flag(check, "--check");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
const int64_t flop = int64_t(m) * int64_t(n) * int64_t(k) * 2;
// initialize host data
std::cout << "generate data\n";
roctxRangePush("generate data");
float *aHost, *bHost, *cHost, *cExpected;
CUDA_RUNTIME(hipHostMalloc(&aHost, m * k * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&bHost, k * n * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost, m * n * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cExpected, m * n * sizeof(float), 0));
std::generate(aHost, aHost + m * k, random_int);
std::generate(bHost, bHost + k * n, random_int);
roctxRangePop();
// allocate device data
float *aDev, *bDev, *cDev;
CUDA_RUNTIME(hipMalloc(&aDev, m * k * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&bDev, k * n * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev, m * n * sizeof(float)));
// copy data to device
std::cout << "transfer to GPU\n";
roctxRangePush("host-to-device");
CUDA_RUNTIME(
hipMemcpy(aDev, aHost, m * k * sizeof(float), hipMemcpyDefault));
CUDA_RUNTIME(
hipMemcpy(bDev, bHost, k * n * sizeof(float), hipMemcpyDefault));
roctxRangePop();
// create events to time GPU kernel
hipEvent_t start, stop;
CUDA_RUNTIME(hipEventCreate(&start));
CUDA_RUNTIME(hipEventCreate(&stop));
// GPU kernel launch parameters
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid;
dimGrid.x = (n + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (m + dimBlock.y - 1) / dimBlock.y;
// total elapsed time
float elapsed = 0;
/* Launch the kernel nIters + nWarmup times
Check for correctness on the first time.
Record the time after nWarmup runs complete.
*/
for (int i = 0; i < nIters + nWarmup; ++i) {
CUDA_RUNTIME(hipEventRecord(start));
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, 0, cDev, aDev, bDev, m, n, k);
CUDA_RUNTIME(hipEventRecord(stop));
CUDA_RUNTIME(hipEventSynchronize(stop));
// check result once
if (check && 0 == i) {
// copy result to host
CUDA_RUNTIME(
hipMemcpy(cHost, cDev, m * n * sizeof(float), hipMemcpyDefault));
// check result on host
cpu_gemm(cExpected, aHost, bHost, m, n, k);
for (size_t i = 0; i < m * n; ++i) {
if (!equal(cExpected[i], cHost[i], 1e-6)) {
std::cout << "Error!\n";
exit(EXIT_FAILURE);
}
}
}
float millis;
CUDA_RUNTIME(hipEventElapsedTime(&millis, start, stop));
std::cout << i << ": " << millis << (i >= nWarmup ? " *" : " ") << "\n";
// record time after warmup runs
if (i >= nWarmup) {
elapsed += millis;
}
}
// print results
double gflops = flop / ((elapsed / nIters) / 1000) / 1e9;
std::cout << "kernel " << gflops << "GFLOPS (" << flop << " flop, "
<< (elapsed / nIters) / 1000 << "s)\n";
// release resources
CUDA_RUNTIME(hipEventDestroy(start));
CUDA_RUNTIME(hipEventDestroy(stop));
CUDA_RUNTIME(hipFree(aDev));
CUDA_RUNTIME(hipFree(bDev));
CUDA_RUNTIME(hipFree(cDev));
CUDA_RUNTIME(hipHostFree(aHost));
CUDA_RUNTIME(hipHostFree(bHost));
CUDA_RUNTIME(hipHostFree(cHost));
CUDA_RUNTIME(hipHostFree(cExpected));
return 0;
}
| 0769378bc05ce15f1e7f8dffc7a08d0d78672d95.cu | #include <algorithm>
#include <nvToolsExt.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_WIDTH 32
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
__shared__ float aSh[TILE_WIDTH][TILE_WIDTH];
__shared__ float bSh[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by * TILE_WIDTH + ty;
int j = bx * TILE_WIDTH + tx;
float acc = 0;
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
for (int m = 0; m < (K - 1) / TILE_WIDTH + 1; ++m) {
if (i < M && m * TILE_WIDTH + tx < K) {
aSh[ty][tx] = A(i, m * TILE_WIDTH + tx);
} else {
aSh[ty][tx] = 0;
}
if (j < N && m * TILE_WIDTH + ty < K) {
bSh[ty][tx] = B(m * TILE_WIDTH + ty, j);
} else {
bSh[ty][tx] = 0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
acc += aSh[ty][k] * bSh[k][tx];
}
__syncthreads();
}
if (i < M && j < N) {
C(i, j) = acc;
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1489 x 1493
// B: 1493 x 1499
// C: 1489 x 1499
int m = 1489;
int n = 1499;
int k = 1493;
int nIters = 5;
int nWarmup = 5;
bool check = false;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
parser.add_flag(check, "--check");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
const int64_t flop = int64_t(m) * int64_t(n) * int64_t(k) * 2;
// initialize host data
std::cout << "generate data\n";
nvtxRangePush("generate data");
float *aHost, *bHost, *cHost, *cExpected;
CUDA_RUNTIME(cudaHostAlloc(&aHost, m * k * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&bHost, k * n * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost, m * n * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cExpected, m * n * sizeof(float), 0));
std::generate(aHost, aHost + m * k, random_int);
std::generate(bHost, bHost + k * n, random_int);
nvtxRangePop();
// allocate device data
float *aDev, *bDev, *cDev;
CUDA_RUNTIME(cudaMalloc(&aDev, m * k * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&bDev, k * n * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev, m * n * sizeof(float)));
// copy data to device
std::cout << "transfer to GPU\n";
nvtxRangePush("host-to-device");
CUDA_RUNTIME(
cudaMemcpy(aDev, aHost, m * k * sizeof(float), cudaMemcpyDefault));
CUDA_RUNTIME(
cudaMemcpy(bDev, bHost, k * n * sizeof(float), cudaMemcpyDefault));
nvtxRangePop();
// create events to time GPU kernel
cudaEvent_t start, stop;
CUDA_RUNTIME(cudaEventCreate(&start));
CUDA_RUNTIME(cudaEventCreate(&stop));
// GPU kernel launch parameters
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid;
dimGrid.x = (n + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (m + dimBlock.y - 1) / dimBlock.y;
// total elapsed time
float elapsed = 0;
/* Launch the kernel nIters + nWarmup times
Check for correctness on the first time.
Record the time after nWarmup runs complete.
*/
for (int i = 0; i < nIters + nWarmup; ++i) {
CUDA_RUNTIME(cudaEventRecord(start));
mygemm<<<dimGrid, dimBlock>>>(cDev, aDev, bDev, m, n, k);
CUDA_RUNTIME(cudaEventRecord(stop));
CUDA_RUNTIME(cudaEventSynchronize(stop));
// check result once
if (check && 0 == i) {
// copy result to host
CUDA_RUNTIME(
cudaMemcpy(cHost, cDev, m * n * sizeof(float), cudaMemcpyDefault));
// check result on host
cpu_gemm(cExpected, aHost, bHost, m, n, k);
for (size_t i = 0; i < m * n; ++i) {
if (!equal(cExpected[i], cHost[i], 1e-6)) {
std::cout << "Error!\n";
exit(EXIT_FAILURE);
}
}
}
float millis;
CUDA_RUNTIME(cudaEventElapsedTime(&millis, start, stop));
std::cout << i << ": " << millis << (i >= nWarmup ? " *" : " ") << "\n";
// record time after warmup runs
if (i >= nWarmup) {
elapsed += millis;
}
}
// print results
double gflops = flop / ((elapsed / nIters) / 1000) / 1e9;
std::cout << "kernel " << gflops << "GFLOPS (" << flop << " flop, "
<< (elapsed / nIters) / 1000 << "s)\n";
// release resources
CUDA_RUNTIME(cudaEventDestroy(start));
CUDA_RUNTIME(cudaEventDestroy(stop));
CUDA_RUNTIME(cudaFree(aDev));
CUDA_RUNTIME(cudaFree(bDev));
CUDA_RUNTIME(cudaFree(cDev));
CUDA_RUNTIME(cudaFreeHost(aHost));
CUDA_RUNTIME(cudaFreeHost(bHost));
CUDA_RUNTIME(cudaFreeHost(cHost));
CUDA_RUNTIME(cudaFreeHost(cExpected));
return 0;
}
|
cf8716c6dcd36af9e0ea77a9b4d73c0487f2ec1b.hip | // !!! This is a file automatically generated by hipify!!!
//Is L1 sector?
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
using namespace std;
#define THREADS_PER_BLOCK 1024
#define THREADS_PER_SM 1024
#define BLOCKS_NUM 1
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 256
#define ARRAY_SIZE 32800
//#define L1_SIZE 32768
#define L1_SIZE 30976
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void l1_sector(uint32_t *startClk, uint32_t *stopClk, float *dsink, float *posArray){
// thread index
uint32_t tid = threadIdx.x;
uint32_t uid = blockIdx.x * blockDim.x + tid;
// a register to avoid compiler optimization
float sink0 = 0;
float sink1 = 0;
float sink2 = 0;
float sink3 = 0;
// populate l1 cache to warm up
for (uint32_t i = tid; i<L1_SIZE; i+=THREADS_PER_BLOCK) {
float* ptr = posArray + i;
// use ca modifier to cache the load in L1
asm volatile ("{\t\n"
".reg .f32 data;\n\t"
"ld.global.ca.f32 data, [%1];\n\t"
"add.f32 %0, data, %0;\n\t"
"}" : "+f"(sink0) : "l"(ptr) : "memory"
);
}
// synchronize all threads
asm volatile ("bar.sync 0;");
//kicks out one of the cache line and read a sector
if(uid == 0)
{
sink0 += posArray[L1_SIZE+1];
}
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// load data from l1 cache and accumulate
float* ptr = posArray + tid*8;
asm volatile ("{\t\n"
".reg .f32 data;\n\t"
"ld.global.ca.f32 data, [%1];\n\t"
"add.f32 %0, data, %0;\n\t"
"}" : "+f"(sink0) : "l"(ptr) : "memory"
);
// stop timing
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// synchronize all threads
asm volatile("bar.sync 0;");
// write time and data back to memory
startClk[uid] = start;
stopClk[uid] = stop;
dsink[uid] = sink0+sink1+sink2+sink3;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
float *posArray = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *dsink = (float*) malloc(TOTAL_THREADS*sizeof(float));
uint32_t *startClk_g;
uint32_t *stopClk_g;
float *posArray_g;
float *dsink_g;
for (uint32_t i=0; i<ARRAY_SIZE; i++)
posArray[i] = (float)i;
gpuErrchk( hipMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&posArray_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( hipMalloc(&dsink_g, TOTAL_THREADS*sizeof(float)) );
gpuErrchk( hipMemcpy(posArray_g, posArray, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( l1_sector), dim3(BLOCKS_NUM),dim3(THREADS_PER_BLOCK), 0, 0, startClk_g, stopClk_g, dsink_g, posArray_g);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(float), hipMemcpyDeviceToHost) );
ofstream myfile;
myfile.open ("data.csv");
myfile << "sectror_id, lat"<<endl;
for(unsigned i=0; i< TOTAL_THREADS; i++){
myfile << i << "," << stopClk[i]-startClk[i]<<endl;
}
myfile.close();
return 0;
}
| cf8716c6dcd36af9e0ea77a9b4d73c0487f2ec1b.cu | //Is L1 sector?
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#include <fstream>
using namespace std;
#define THREADS_PER_BLOCK 1024
#define THREADS_PER_SM 1024
#define BLOCKS_NUM 1
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 256
#define ARRAY_SIZE 32800
//#define L1_SIZE 32768
#define L1_SIZE 30976
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void l1_sector(uint32_t *startClk, uint32_t *stopClk, float *dsink, float *posArray){
// thread index
uint32_t tid = threadIdx.x;
uint32_t uid = blockIdx.x * blockDim.x + tid;
// a register to avoid compiler optimization
float sink0 = 0;
float sink1 = 0;
float sink2 = 0;
float sink3 = 0;
// populate l1 cache to warm up
for (uint32_t i = tid; i<L1_SIZE; i+=THREADS_PER_BLOCK) {
float* ptr = posArray + i;
// use ca modifier to cache the load in L1
asm volatile ("{\t\n"
".reg .f32 data;\n\t"
"ld.global.ca.f32 data, [%1];\n\t"
"add.f32 %0, data, %0;\n\t"
"}" : "+f"(sink0) : "l"(ptr) : "memory"
);
}
// synchronize all threads
asm volatile ("bar.sync 0;");
//kicks out one of the cache line and read a sector
if(uid == 0)
{
sink0 += posArray[L1_SIZE+1];
}
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// load data from l1 cache and accumulate
float* ptr = posArray + tid*8;
asm volatile ("{\t\n"
".reg .f32 data;\n\t"
"ld.global.ca.f32 data, [%1];\n\t"
"add.f32 %0, data, %0;\n\t"
"}" : "+f"(sink0) : "l"(ptr) : "memory"
);
// stop timing
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// synchronize all threads
asm volatile("bar.sync 0;");
// write time and data back to memory
startClk[uid] = start;
stopClk[uid] = stop;
dsink[uid] = sink0+sink1+sink2+sink3;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
float *posArray = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *dsink = (float*) malloc(TOTAL_THREADS*sizeof(float));
uint32_t *startClk_g;
uint32_t *stopClk_g;
float *posArray_g;
float *dsink_g;
for (uint32_t i=0; i<ARRAY_SIZE; i++)
posArray[i] = (float)i;
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&posArray_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMalloc(&dsink_g, TOTAL_THREADS*sizeof(float)) );
gpuErrchk( cudaMemcpy(posArray_g, posArray, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
l1_sector<<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, dsink_g, posArray_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(float), cudaMemcpyDeviceToHost) );
ofstream myfile;
myfile.open ("data.csv");
myfile << "sectror_id, lat"<<endl;
for(unsigned i=0; i< TOTAL_THREADS; i++){
myfile << i << "," << stopClk[i]-startClk[i]<<endl;
}
myfile.close();
return 0;
}
|
6611c922a45d3c5305e112ecb4ca100a6fd4ed94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by chujie on 1/17/19.
//
#include <algorithm>
#include <cfloat>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#define CAFFE_MAX_POOLING_THRESHOLD 1e-8f
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* top_data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
Dtype maxval = -FLT_MAX;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
maxval = max(maxval, bottom_data[h * width + w]);
}
}
top_data[index] = maxval;
} // (if index < nthreads)
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* top_data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / ksize / ksize;
} // (if index < nthreads)
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
switch (this->layer_param_.pool()) {
case LayerParameter_PoolMethod_MAX:
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
top_data);
break;
case LayerParameter_PoolMethod_AVE:
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
top_data);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* bottom_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
Dtype bottom_datum =
bottom_data[((n * channels + c) * height + h) * width + w];
top_data += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(bottom_datum >= top_data[ph * pooled_width + pw] -
CAFFE_MAX_POOLING_THRESHOLD);
}
}
bottom_diff[index] = gradient;
} // (if index < nthreads)
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* bottom_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw];
}
}
bottom_diff[index] = gradient / ksize / ksize;
} // (if index < nthreads)
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down) {
return Dtype(0.);
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int count = (*bottom)[0]->count();
switch (this->layer_param_.pool()) {
case LayerParameter_PoolMethod_MAX:
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,
POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff);
break;
case LayerParameter_PoolMethod_AVE:
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
| 6611c922a45d3c5305e112ecb4ca100a6fd4ed94.cu | //
// Created by chujie on 1/17/19.
//
#include <algorithm>
#include <cfloat>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#define CAFFE_MAX_POOLING_THRESHOLD 1e-8f
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* top_data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
Dtype maxval = -FLT_MAX;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
maxval = max(maxval, bottom_data[h * width + w]);
}
}
top_data[index] = maxval;
} // (if index < nthreads)
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* top_data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + ksize, height);
int wstart = pw * stride;
int wend = min(wstart + ksize, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / ksize / ksize;
} // (if index < nthreads)
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
switch (this->layer_param_.pool()) {
case LayerParameter_PoolMethod_MAX:
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
top_data);
break;
case LayerParameter_PoolMethod_AVE:
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
top_data);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* bottom_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
Dtype bottom_datum =
bottom_data[((n * channels + c) * height + h) * width + w];
top_data += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(bottom_datum >= top_data[ph * pooled_width + pw] -
CAFFE_MAX_POOLING_THRESHOLD);
}
}
bottom_diff[index] = gradient;
} // (if index < nthreads)
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int ksize, const int stride, Dtype* bottom_diff) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw];
}
}
bottom_diff[index] = gradient / ksize / ksize;
} // (if index < nthreads)
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down) {
return Dtype(0.);
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int count = (*bottom)[0]->count();
switch (this->layer_param_.pool()) {
case LayerParameter_PoolMethod_MAX:
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,
POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff);
break;
case LayerParameter_PoolMethod_AVE:
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), CHANNELS_,
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
|
38e94555bdf6eb0607583cbd0addfffff8364954.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// sset.cu --- Part of the project OPLib 1.0, a high performance pricing library
// based on operator methods, higher level BLAS and multicore architectures
// Author: 2009 Claudio Albanese
// Maintainer: Claudio Albanese <[email protected]>
// Created: April-July 2009
// Version: 1.0.0
// Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by
// Vasily Volkov's implementation of SGEMM
// We use several variations of the multi-threaded Mersenne Twister algorithm of
// period 2203 due to Makoto Matsumoto.
// The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk
// included in the CUDA SDK.
// CPU-side BLAS and random number generators link to primitives in the
// Intel Math Kernel Libraries.
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to
// the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
// Boston, MA 02111-1307, USA.
#ifdef LINUX
#define __declspec(x)
#define __stdcall
#endif
#define NTHREADS 4096
__global__ void global_ssetall(float *x, int n, float c, int incx)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int j;
for(int i=0; i<n; i+= NTHREADS)
{
j = i+tid;
if(j<n) x[j*incx] = c;
}
}
extern "C" __declspec( dllexport ) void opcuda_ssetall(unsigned int xPtr, int n, float c, int incx)
{
hipLaunchKernelGGL(( global_ssetall), dim3(32), dim3(128) , 0, 0, (float *)xPtr, n, c, incx);
}
__global__ void global_ssetone(float *x, int n, float c, int i)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid==i) x[i] = c;
}
extern "C" __declspec( dllexport ) void opcuda_ssetone(unsigned int xPtr, int n, float c, int i)
{
hipLaunchKernelGGL(( global_ssetone), dim3(1), dim3(128) , 0, 0, (float *)xPtr, n, c, i);
}
| 38e94555bdf6eb0607583cbd0addfffff8364954.cu | // sset.cu --- Part of the project OPLib 1.0, a high performance pricing library
// based on operator methods, higher level BLAS and multicore architectures
// Author: 2009 Claudio Albanese
// Maintainer: Claudio Albanese <[email protected]>
// Created: April-July 2009
// Version: 1.0.0
// Credits: The CUDA code for SGEMM4, SGEMV4 and SSQMM were inspired by
// Vasily Volkov's implementation of SGEMM
// We use several variations of the multi-threaded Mersenne Twister algorithm of
// period 2203 due to Makoto Matsumoto.
// The Monte Carlo routine in SMC includes code by Victor Podlozhnyuk
// included in the CUDA SDK.
// CPU-side BLAS and random number generators link to primitives in the
// Intel Math Kernel Libraries.
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to
// the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
// Boston, MA 02111-1307, USA.
#ifdef LINUX
#define __declspec(x)
#define __stdcall
#endif
#define NTHREADS 4096
__global__ void global_ssetall(float *x, int n, float c, int incx)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int j;
for(int i=0; i<n; i+= NTHREADS)
{
j = i+tid;
if(j<n) x[j*incx] = c;
}
}
extern "C" __declspec( dllexport ) void opcuda_ssetall(unsigned int xPtr, int n, float c, int incx)
{
global_ssetall<<< 32, 128 >>>((float *)xPtr, n, c, incx);
}
__global__ void global_ssetone(float *x, int n, float c, int i)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid==i) x[i] = c;
}
extern "C" __declspec( dllexport ) void opcuda_ssetone(unsigned int xPtr, int n, float c, int i)
{
global_ssetone<<< 1, 128 >>>((float *)xPtr, n, c, i);
}
|
b5d5e1414c59411b90063d2e3ff16d1795542f09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
//
// @author George A. Shulinok <[email protected]>
//
#include <ops/declarable/helpers/image_resize.h>
#include <exceptions/cuda_exception.h>
#include <array/NDArrayFactory.h>
namespace sd {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
// Older incorrect scaling method that causes all resizes to have a slight
// translation leading to inconsistent results. For example, a flip then a
// resize gives different results then a resize then a flip.
struct LegacyScaler {
_CUDA_HD LegacyScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
return static_cast<float>(x) * scale;
}
};
// Half pixel scaler scales assuming that the pixel centers are at 0.5, i.e. the
// floating point coordinates of the top,left pixel is 0.5,0.5.
struct HalfPixelScaler {
_CUDA_HD HalfPixelScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
// Note that we subtract 0.5 from the return value, as the existing bilinear
// sampling code etc assumes pixels are in the old coordinate system.
return (static_cast<float>(x) + 0.5f) * scale - 0.5f;
}
};
// Utility functions
// calculateResizeScale determines the float scaling factor.
inline float calculateResizeScale(Nd4jLong inSize, Nd4jLong outSize,
bool alignCorners) {
return (alignCorners && outSize > 1)
? (inSize - 1) / static_cast<float>(outSize - 1)
: inSize / static_cast<float>(outSize);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
template <class Scaler>
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
Scaler scaler;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = scaler(i, scale);
// interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
// interpolationData[i].topIndex = sd::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
// interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
double const in_f = sd::math::p_floor<double>(in);
double const in_c = sd::math::p_ceil<double>(in);
interpolationData[i].bottomIndex = sd::math::nd4j_max(static_cast<Nd4jLong>(in_f), (Nd4jLong)0LL);//static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = sd::math::nd4j_min(static_cast<Nd4jLong>(in_c), inSize - 1);
interpolationData[i].interpolarValue = in - in_f;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T, typename Z>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, Z* outputYptr,
Nd4jLong const* outputShape, Nd4jLong batchSize, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels,
Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
for (auto batch = blockIdx.x; batch < batchSize; batch += gridDim.x ) { // blockIdx.x as batch index
auto pX = input + batch * inBatchNumValues;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T* ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T* ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + (batch * outHeight + y) * outRowSize;
for (Nd4jLong x = 0; x < outWidth; x++) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = 0; c < channels; c++) {
Z topLeft(ys_input_lower_ptr[xsBottom + c]);
Z topRight(ys_input_lower_ptr[xsTop + c]);
Z bottomLeft(ys_input_upper_ptr[xsBottom + c]);
Z bottomRight(ys_input_upper_ptr[xsTop + c]);
Z top = topLeft + (topRight - topLeft) * xVal;
Z bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
Z resVal = Z(top + (bottom - top) * yVal);
pZ[x * channels + c] = resVal;
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T, typename F>
static void resizeImage_(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const* pInput = images->getDataBuffer()->specialAsT<T>(); //reinterpret_cast<T const *>(images->specialBuffer()); // this works only with 'c' direction
F* pOutput = output->dataBuffer()->specialAsT<F>();//reinterpret_cast<F *>(output->specialBuffer());
dim3 batchSizeBlock(batchSize, 1, 1);
dim3 pictureBlock(outHeight, outWidth, channels);
hipLaunchKernelGGL(( resizeImageKernel<T,F>), dim3(256), dim3(256), 256, *stream, pInput, images->specialShapeInfo(), pOutput,
output->specialShapeInfo(), batchSize, outWidth, outHeight, channels, inRowSize, outRowSize,
inBatchNumValues, xs_, ys_);
auto err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeImage_: Cannot synchronize kernel execution", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename F>
static int resizeBilinearFunctor_(sd::LaunchContext* context, NDArray const* images, int const width,
int const height, bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
hipError_t err = hipMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = hipMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
if (halfPixelCenter) {
hipLaunchKernelGGL(( computeInterpolationWeights <
HalfPixelScaler >), dim3(256), dim3(512), 512, *stream, outHeight, inHeight, heightScale, 0, ys_);
hipLaunchKernelGGL(( computeInterpolationWeights <
HalfPixelScaler >), dim3(256), dim3(512), 512, *stream, outWidth, inWidth, widthScale, channels, xs_);
}
else {
hipLaunchKernelGGL(( computeInterpolationWeights <
LegacyScaler >), dim3(256), dim3(512), 512, *stream, outHeight, inHeight, heightScale, 0, ys_);
hipLaunchKernelGGL(( computeInterpolationWeights <
LegacyScaler >), dim3(256), dim3(512), 512, *stream, outWidth, inWidth, widthScale, channels, xs_);
}
printf("Input is %dx%d, Output is %dx%d\n", inHeight, inWidth, outHeight, outWidth);
NDArray::prepareSpecialUse({output}, {images});
resizeImage_<T,F>(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
err = hipStreamSynchronize(*stream);
NDArray::registerSpecialUse({output}, {images});
err = hipFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = hipFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong const* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool alignCorners, bool halfPixelCenters) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
auto posY = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale));
Nd4jLong inY = sd::math::nd4j_min(posY, inHeight - 1);
if (halfPixelCenters) {
inY = sd::math::nd4j_max(0LL, inY);
}
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto posX = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale));
Nd4jLong inX = sd::math::nd4j_min(posX, inWidth - 1);
if (halfPixelCenters) {
inX = sd::math::nd4j_max(0LL, inX);
}
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// if ((alignCorners && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (alignCorners && outHeight < 2) ||
// (alignCorners && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// // wrong input data
// nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
// return ND4J_STATUS_BAD_ARGUMENTS;
// }
// float heightScale = alignCorners ? (inHeight - 1.f) / float(outHeight - 1.f) : (inHeight / float(outHeight));
// float widthScale = alignCorners ? (inWidth - 1.f) / float(outWidth - 1.f) : (inWidth / float(outWidth));
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
auto imagesBuffer = images->getDataBuffer()->specialAsT<T>();//reinterpret_cast<T const*>(images->specialBuffer());
auto outputBuffer = output->dataBuffer()->specialAsT<T>();//reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {images});
hipLaunchKernelGGL(( resizeNeighborKernel<T>), dim3(batchSize), dim3(outHeight * outWidth), 512, *stream, imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, alignCorners, halfPixelCenters);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(),
resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels,
xs_, ys_, output), NUMERIC_TYPES, FLOAT_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void resizeImage_,(sd::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output),
NUMERIC_TYPES, FLOAT_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(sd::LaunchContext* context, NDArray const* images, int width, int height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(), return resizeBilinearFunctor_, (context, images,
width, height, alignCorners, halfPixelCenter, output), NUMERIC_TYPES, FLOAT_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (sd::LaunchContext* context,
// NDArray const* images, int const width, int const height, bool const alignCorners,
// bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_,
(context, images, width, height, alignCorners, halfPixelCenter, output), LIBND4J_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (sd::LaunchContext* context, NDArray const* images,
// int width, int height, bool const alignCorners, bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bicubic interpolation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct ImageResizerState {
explicit ImageResizerState(bool alignCorners, bool halfPixelCenters)
: _alignCorners(alignCorners),
_halfPixelCenters(halfPixelCenters) {}
// ValidateAndCalculateOutputSize checks the bounds on the input tensors
// and requested size, sets up some of the resizing state such as the
// heightScale and widthScale, and calculates the output size.
// If any of these operations fails, it sets an error status in
// the context, which the caller must check.
int validateAndCalculateOutputSize(NDArray const* input, int const width, int const height) {
//
batchSize = input->sizeAt(0);//.dim_size(0);
outHeight = height;
outWidth = width; //internal::SubtleMustCopy(Svec(1));
inHeight = static_cast<int32_t>(input->sizeAt(1));
inWidth = static_cast<int32_t>(input->sizeAt(2));
channels = input->sizeAt(3); //.dim_size(3);
heightScale = calculateResizeScale(inHeight, outHeight, _alignCorners);
widthScale = calculateResizeScale(inWidth, outWidth, _alignCorners);
// Guard against overflows
if (ceilf((outHeight - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize height");
}
if (ceilf((outWidth - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize width");
}
return Status::OK();
}
// Calculates all the required variables, and allocates the output.
int validateAndCreateOutput(NDArray const* input, int const width, int const height) {
return validateAndCalculateOutputSize(input, width, height);
}
Nd4jLong batchSize;
Nd4jLong outHeight;
Nd4jLong outWidth;
Nd4jLong inHeight;
Nd4jLong inWidth;
Nd4jLong channels;
float heightScale;
float widthScale;
NDArray* output = nullptr;
hipStream_t* stream;
private:
bool _alignCorners;
bool _halfPixelCenters;
};
struct WeightsAndIndices {
float _weight0;
float _weight1;
float _weight2;
float _weight3;
Nd4jLong _index0;
Nd4jLong _index1;
Nd4jLong _index2;
Nd4jLong _index3;
int _advance; // advance value.
};
class CachedInterpolationCalculator {
public:
_CUDA_HD CachedInterpolationCalculator() : _indexes{-1, -1, -1, -1} {}
// Advances iteration. Returns the number of values that should be copied from
// the current point to the next point. The copying should always be done by
// copying the last <retval> values from the old point to the first <retval>
// values of the new point.
inline _CUDA_HD int Advance(const Nd4jLong x0, const Nd4jLong x1, const Nd4jLong x2,
const Nd4jLong x3) {
// We use 2 hands and walk through, copying from one to another where
// we already have values.
// Invariant, new_indicies_hand <= cached_values_hand
const Nd4jLong new_x_indices[4] = {x0, x1, x2, x3};
int cachedValuesHand = 0;
int newIndiciesHand = 0;
while (cachedValuesHand < 4) {
if (_indexes[cachedValuesHand] == new_x_indices[newIndiciesHand]) {
if (newIndiciesHand < cachedValuesHand) {
_indexes[newIndiciesHand] = _indexes[cachedValuesHand];
}
newIndiciesHand++;
}
cachedValuesHand++;
}
switch (newIndiciesHand) {
case 0:
_indexes[0] = x0;
case 1:
_indexes[1] = x1;
case 2:
_indexes[2] = x2;
case 3:
_indexes[3] = x3;
break;
}
return newIndiciesHand;
}
private:
Nd4jLong _indexes[4];
};
static __global__ void initCoefTableKernel(const double a, float* table, Nd4jLong tableSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i <= tableSize; i += step) {
float x = i * 1.0 / tableSize;
table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
}
static const Nd4jLong kTableSize = (1 << 10);
float* initCoeffsTable(const double a, hipStream_t* stream) {
// Allocate and initialize coefficients table using Bicubic
// convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_table; // = new float[(kTableSize + 1) * 2];
auto err = hipMalloc(&coeffs_table, sizeof(float) * ((kTableSize + 1) * 2));
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot allocate memory for vertical parts rectangulars", err);
}
hipLaunchKernelGGL(( initCoefTableKernel), dim3(128),dim3(128),128, *stream, a, coeffs_table, kTableSize);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot syncronize kernel", err);
}
return coeffs_table;
}
// _CUDA_HD const float* getCoeffsTable(const bool use_keys_cubic) {
// // Static so that we initialize it on first use
// if (use_keys_cubic) {
// // http://ieeexplore.ieee.org/document/1163711/
// // R. G. Keys. Cubic convolution interpolation for digital image
// // processing. IEEE Transactions on Acoustics, Speech, and Signal
// // Processing, 29(6):11531160, 1981.
// //static const float* coeffs_table = initCoeffsTable(-0.5f, stream);
// return sCoeffsTableHalf;
// } else {
// //static const float* coeffs_table = initCoeffsTable(-0.75f, stream);
// return sCoeffsTableThreeFourth;
// }
// }
inline _CUDA_HD Nd4jLong bound(Nd4jLong val, Nd4jLong limit) {
return math::nd4j_min(limit - 1ll, math::nd4j_max(Nd4jLong{0}, val));
}
template <typename T>
inline _CUDA_HD float interpolate1D(const float weight0, const float weight1, const float weight2, const float weight3,
const T value0, const T value1, const T value2, const T value3) {
return static_cast<float>(value0) * weight0 +
static_cast<float>(value1) * weight1 +
static_cast<float>(value2) * weight2 +
static_cast<float>(value3) * weight3;
}
// Compute the 1D interpolation for a given X index using the y_weights
static _CUDA_HD float compute(float values[4], const float xW0, const float xW1, const float xW2, const float xW3) {
return interpolate1D(xW0, xW1, xW2, xW3, values[0], values[1],values[2], values[3]);
}
template <typename Scaler, bool use_keys_cubic>
inline _CUDA_HD void getWeightsAndIndices(float const* coeffs_table, const float scale, const Nd4jLong out_loc, const Nd4jLong limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const Nd4jLong in_loc = math::nd4j_floor<float, Nd4jLong>(in_loc_f);
const float delta = in_loc_f - in_loc;
const Nd4jLong offset = math::nd4j_round<float, Nd4jLong>(delta * kTableSize);
//const float* coeffs_table = getCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
// The legacy code placed more weight on the edge pixels, since bounding
// the set of inputs to sample could cause an edge pixel to be repeated.
// Here we change the behavior at borders to match that used by the
// scale_and_translate_op, where sampling locations outside the image have
// their weight set to 0, and the weights are renormalized so that their sum
// is 1.0.
out->_index0 = bound(in_loc - 1, limit);
out->_weight0 =
(out->_index0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->_index1 = bound(in_loc, limit);
out->_weight1 = (out->_index1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->_index2 = bound(in_loc + 1, limit);
out->_weight2 =
(out->_index2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->_index3 = bound(in_loc + 2, limit);
out->_weight3 = (out->_index3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->_weight0 + out->_weight1 + out->_weight2 + out->_weight3;
if (math::nd4j_abs(weight_sum) >= 1000.0f * DataTypeUtils::min<float>()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->_weight0 *= one_over_weight_sum;
out->_weight1 *= one_over_weight_sum;
out->_weight2 *= one_over_weight_sum;
out->_weight3 *= one_over_weight_sum;
}
} else {
out->_weight0 = coeffs_table[offset * 2 + 1];
out->_weight1 = coeffs_table[offset * 2];
out->_weight2 = coeffs_table[(kTableSize - offset) * 2];
out->_weight3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->_index0 = bound(in_loc - 1, limit);
out->_index1 = bound(in_loc, limit);
out->_index2 = bound(in_loc + 1, limit);
out->_index3 = bound(in_loc + 2, limit);
}
}
static __global__ void accumulateChannelsKernel(WeightsAndIndices* pXWais, Nd4jLong outWidth, Nd4jLong channels) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
pXWais[x]._index0 *= channels;
pXWais[x]._index1 *= channels;
pXWais[x]._index2 *= channels;
pXWais[x]._index3 *= channels;
}
}
static __global__ void advaceWeightsAndIndicesKernel(float const* cacheTable, CachedInterpolationCalculator* calc, WeightsAndIndices* pXWais, Nd4jLong inWidth, float widthScale,
Nd4jLong outWidth, Nd4jLong channels, bool halfPixelCenters) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
if (halfPixelCenters)
getWeightsAndIndices<HalfPixelScaler, true>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
else
getWeightsAndIndices<LegacyScaler, false>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
pXWais[x]._advance = calc->Advance(pXWais[x]._index0, pXWais[x]._index1, pXWais[x]._index2, pXWais[x]._index3);
}
}
// resizerState and xWais are device allocated
static void computeXWeightsAndIndices(float const* coeffsTable, const ImageResizerState& resizerState,
const bool halfPixelCenters,
WeightsAndIndices* pXWais) {
auto stream = resizerState.stream;
auto outWidth = resizerState.outWidth;
CachedInterpolationCalculator calc; // = new CachedInterpolationCalculator;
CachedInterpolationCalculator* pCalcD;
auto err = hipMalloc(&pCalcD, sizeof(CachedInterpolationCalculator));
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot allocated device memory for interpolate calculator", err);
}
err = hipMemcpyAsync(pCalcD, &calc, sizeof(CachedInterpolationCalculator), hipMemcpyHostToDevice, *stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot set up device memory for interpolate calculator", err);
}
hipLaunchKernelGGL(( advaceWeightsAndIndicesKernel), dim3(128), dim3(128), 128, *stream, coeffsTable, pCalcD, pXWais, resizerState.inWidth, resizerState.widthScale, outWidth, resizerState.channels, halfPixelCenters);
err = hipFree(pCalcD);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot deallocated device memory for interpolate calculator", err);
}
err = hipStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after advance weights and indicers", err);
}
// Scale the values so they can be used as offsets into buffers.
hipLaunchKernelGGL(( accumulateChannelsKernel), dim3(128), dim3(128), 512, *stream, pXWais, outWidth, resizerState.channels);
err = hipStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after accumulate channels", err);
}
}
template <typename T>
static _CUDA_HD FORCEINLINE float computeYInterpolation(
int which, int channelNum, const WeightsAndIndices& yWai,
const T* pY0, const T* pY1, const T* pY2, const T* pY3,
const WeightsAndIndices& xWai) {
int xIndex;
switch (which) {
case 0:
xIndex = xWai._index0;
break;
case 1:
xIndex = xWai._index1;
break;
case 2:
xIndex = xWai._index2;
break;
default:
xIndex = xWai._index3;
break;
}
const Nd4jLong pt_index = xIndex + channelNum;
return interpolate1D<T>(yWai._weight0, yWai._weight1, yWai._weight2,
yWai._weight3, pY0[pt_index], pY1[pt_index],
pY2[pt_index], pY3[pt_index]);
}
template <typename T>
static __global__ void bicubicInterpolateWithCachingKernel(float const* cachedTable, T const* inputPtr, ImageResizerState* pResizerState, WeightsAndIndices* xWais, bool halfPixelCenters, Nd4jLong inBatchWidth, Nd4jLong inRowWidth, float* outputPtr) {
// auto numChannels = pResizerState->channels;
for (Nd4jLong b = blockIdx.x; b < pResizerState->batchSize; b += gridDim.x) {
auto pInput = inputPtr + b * inBatchWidth;
float* cachedValue;
for (Nd4jLong y = threadIdx.x; y < pResizerState->outHeight; y += blockDim.x) {
if (threadIdx.x == 0) {
extern __shared__ char sharedChar[];
cachedValue = reinterpret_cast<float*>(sharedChar);
}
auto pos = (b * pResizerState->outHeight + y) * pResizerState->outWidth * pResizerState->channels;
auto pOutput = &outputPtr[pos];
struct WeightsAndIndices yWai;
if (halfPixelCenters) {
getWeightsAndIndices<HalfPixelScaler, true>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
} else {
getWeightsAndIndices<LegacyScaler, false>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
}
// Make pointers represent offsets of data in inputBPtr.
const T* y_ptr_0 = pInput + yWai._index0 * inRowWidth;
const T* y_ptr_1 = pInput + yWai._index1 * inRowWidth;
const T* y_ptr_2 = pInput + yWai._index2 * inRowWidth;
const T* y_ptr_3 = pInput + yWai._index3 * inRowWidth;
if (pResizerState->channels == 3) {
// Manually unroll case of 3 channels.
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cached_value_* to fill first '_advance' values.
switch (xWai._advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
cached_value_0[0] = computeYInterpolation(0, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[0] = computeYInterpolation(0, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[0] = computeYInterpolation(0, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 1:
cached_value_0[1] = computeYInterpolation(1, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[1] = computeYInterpolation(1, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[1] = computeYInterpolation(1, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 2:
cached_value_0[2] = computeYInterpolation(2, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[2] = computeYInterpolation(2, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[2] = computeYInterpolation(2, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 3:
cached_value_0[3] = computeYInterpolation(3, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[3] = computeYInterpolation(3, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[3] = computeYInterpolation(3, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
// break;
}
pOutput[x * pResizerState->channels + 0] = compute(cached_value_0, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 1] = compute(cached_value_1, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 2] = compute(cached_value_2, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
}
} else {
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cachedValue to fill first '_advance' values.
switch (xWai._advance) {
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 1];
cachedValue[4 * c + 1] = cachedValue[4 * c + 2];
cachedValue[4 * c + 2] = cachedValue[4 * c + 3];
}
break;
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 2];
cachedValue[4 * c + 1] = cachedValue[4 * c + 3];
}
break;
case 1: {
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 3];
}
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = computeYInterpolation(0, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 1:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 1] = computeYInterpolation(1, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 2] = computeYInterpolation(2, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 3] = computeYInterpolation(3, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
// break;
}
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
pOutput[x * pResizerState->channels + c] = compute(&cachedValue[4 * c], xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3);
}
}
}
}
}
}
template <typename T>
static void
bicubicInterpolateWithCaching(NDArray const* image, ImageResizerState const& resizerState, bool const halfPixelCenters, NDArray* output) {
const auto numChannels = resizerState.channels;
const Nd4jLong inRowWidth = resizerState.inWidth * numChannels;
const Nd4jLong inBatchWidth = resizerState.inHeight * inRowWidth;
auto stream = resizerState.stream; //output->getContext()->getCudaStream();
ImageResizerState* resizerStateD;
auto err = hipMalloc(&resizerStateD, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for resizerState", err);
}
err = hipMemcpyAsync(resizerStateD, &resizerState, sizeof(ImageResizerState), hipMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot set up memory for resizerState", err);
}
// float* cachedValue = nullptr;
// size_t cachedSize = sizeof(float) * (numChannels == 3 ? 0 : 4 * numChannels);
// if (cachedSize) {
// err = hipMalloc(reinterpret_cast<void**>(&cachedValue), cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot allocate memory for cached values", err);
// }
// err = hipMemset(cachedValue, 0, cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot set up memory for cached values", err);
// }
// }
WeightsAndIndices* xWais; //(resizerState.outWidth);
err = hipMalloc(&xWais, sizeof(WeightsAndIndices) * resizerState.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for weights and indices", err);
}
auto coeffsTable = halfPixelCenters?initCoeffsTable(-0.5, stream): initCoeffsTable(-0.75, stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
computeXWeightsAndIndices(coeffsTable, resizerState, halfPixelCenters, xWais);
err = hipStreamQuery(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
const T* pInput = image->getDataBuffer()->specialAsT<T>();
float* pOutput = output->dataBuffer()->specialAsT<float>(); //_data.data();
hipLaunchKernelGGL(( bicubicInterpolateWithCachingKernel<T>), dim3(128), dim3(1), 512, *stream, coeffsTable, pInput,
resizerStateD, xWais, halfPixelCenters, inBatchWidth, inRowWidth, pOutput);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Kernels finished with error", err);
}
err = hipFree(resizerStateD);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for resizerState", err);
}
// if (cachedSize)
// err = hipFree(cachedValue);
// if (err != 0) {
// throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for cached values", err);
// }
err = hipFree(xWais);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for weights and indices", err);
}
err = hipFree(coeffsTable);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for coefficients table", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
int resizeBicubicFunctor_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
return Status::OK();
}
int resizeBicubicFunctor(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctor_, (context, image,
width, height, preserveAspectRatio, antialias, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctor_, (sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
struct CachedInterpolation {
Nd4jLong start;
Nd4jLong end;
float startScale;
float endMinusOneScale;
bool needsBounding;
};
static __global__ void fillInterpolationCache(CachedInterpolation* xCached, Nd4jLong cacheLen, Nd4jLong inWidth, float widthScale) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto increment = blockDim.x * gridDim.x;
for (auto x = start; x < cacheLen; x += increment) {
auto& xCache = xCached[x];
const float inX = x * widthScale;
const float inX1 = (x + 1) * widthScale;
Nd4jLong v = math::nd4j_floor<float, Nd4jLong>(inX);
xCache.start = v;
xCache.startScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
v = math::nd4j_ceil<float, Nd4jLong>(inX1);
xCache.end = v--;
xCache.endMinusOneScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
xCache.needsBounding = bound(xCache.start, inWidth) != xCache.start || bound(xCache.end - 1, inWidth) != (xCache.end - 1);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
struct ScaleCache {
float yScale;
T const* yPtr;
};
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSumOf3Channels(float scale,
const ImageResizerState& st,
ScaleCache<T> const* yScaleCache,
Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
for (int i = 0; i < ptrsLen; ++i) {
const T* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
Nd4jLong offset = 3 * boundIfNeeded(xCache.start, st.inWidth);
float sum_y_0 = static_cast<float>(ptr[offset + 0]) * scaleX;
float sum_y_1 = static_cast<float>(ptr[offset + 1]) * scaleX;
float sum_y_2 = static_cast<float>(ptr[offset + 2]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
Nd4jLong offset = 3 * boundIfNeeded(x, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]);
sum_y_1 += static_cast<float>(ptr[offset + 1]);
sum_y_2 += static_cast<float>(ptr[offset + 2]);
}
scaleX = xCache.endMinusOneScale;
offset = st.channels * boundIfNeeded(xCache.end - 1, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]) * scaleX;
sum_y_1 += static_cast<float>(ptr[offset + 1]) * scaleX;
sum_y_2 += static_cast<float>(ptr[offset + 2]) * scaleX;
}
sum_0 += sum_y_0 * yScaleCache[i].yScale;
sum_1 += sum_y_1 * yScaleCache[i].yScale;
sum_2 += sum_y_2 * yScaleCache[i].yScale;
}
outputPtr[0] = sum_0 * scale;
outputPtr[1] = sum_1 * scale;
outputPtr[2] = sum_2 * scale;
}
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSum(float scale, const ImageResizerState& st,
ScaleCache<T> const* yScaleCache, Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
const auto numChannels = st.channels;
for (Nd4jLong c = 0; c < numChannels; ++c) {
float sum = 0;
for (int i = 0; i < ptrsLen; ++i) {
T const* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
float sumY = static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.start, st.inWidth) + c]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
sumY += static_cast<float>(
ptr[numChannels * boundIfNeeded(x, st.inWidth) + c]);
}
scaleX = xCache.endMinusOneScale;
sumY += static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.end - 1, st.inWidth) + c]) * scaleX;
}
sum += sumY * yScaleCache[i].yScale;
}
outputPtr[c] = sum * scale;
}
}
template <typename T>
static __global__ void resizeAreaKernel(ImageResizerState const* pSt, CachedInterpolation const* caches, float scale,
T const* inputPtr, Nd4jLong const* inputShape, float* outputPtr, Nd4jLong const* outputShape, ScaleCache<T>* cachePool) { //batch * outWidth * outHeight
for (auto batch = blockIdx.x; batch < pSt->batchSize; batch += gridDim.x) {
for (auto y = threadIdx.x; y < pSt->outHeight; y += blockDim.x) {
const float inY = y * pSt->heightScale;
const float inY1 = (y + 1) * pSt->heightScale;
// The start and end height indices of all the cells that could
// contribute to the target cell.
const Nd4jLong yStart = math::nd4j_floor<float, Nd4jLong>(inY);
const Nd4jLong yEnd = math::nd4j_ceil<float, Nd4jLong>(inY1);
auto scalesDim = yEnd - yStart;
auto yScaleCache = cachePool + (batch * pSt->outHeight + y) * pSt->outWidth;
//auto startPtr = sharedPtr + y * scalesDim * sizeof(float);
//float* yScales = yScalesShare + y * sizeof(float) * scalesDim;//reinterpret_cast<float*>(startPtr); //shared + y * scalesDim * y + scalesDim * sizeof(T const *) [scalesDim];
//T const** yPtrs = yPtrsShare + y * sizeof(T const*) * scalesDim; //[scalesDim];
//yPtrs = reinterpret_cast<T const**>(sharedBuf);
float* output = outputPtr + (batch * pSt->outHeight + y) * pSt->channels * pSt->outWidth;
//int k = 0;
for (Nd4jLong i = yStart, k = 0; i < yEnd; ++i, ++k) {
float scaleY;
if (i < inY) {
scaleY = (i + 1 > inY1 ? pSt->heightScale : i + 1 - inY);
} else {
scaleY = (i + 1 > inY1 ? inY1 - i : 1.0);
}
yScaleCache[k].yScale = scaleY;
yScaleCache[k].yPtr = inputPtr + (batch * pSt->inHeight * pSt->inWidth * pSt->channels + bound(i, pSt->inHeight) * pSt->inWidth * pSt->channels);
}
if (pSt->channels == 3) {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation& xCache = caches[x];
computePatchSumOf3Channels<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
} else {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation &xCache = caches[x];
computePatchSum<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
}
}
}
}
template <typename T>
static void resizeArea(hipStream_t* stream, ImageResizerState const& st, CachedInterpolation* cache,
NDArray const* input, NDArray* output) {
T const* inputPtr = reinterpret_cast<T const*>(input->specialBuffer());
// float* yScales;
// T const** yPtrs;
float scale = 1.f / (st.heightScale * st.widthScale);
auto outputPtr = reinterpret_cast<float*>(output->specialBuffer()); // output is always float. TO DO: provide another float types also with template <typename X, typename Z> declaration
ImageResizerState* pSt;
auto err = hipMalloc(&pSt, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for ImageResizerState", err);
}
err = hipMemcpyAsync(pSt, &st, sizeof(ImageResizerState), hipMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot copy to device memory", err);
}
ScaleCache<T>* cachePool;
auto cachePoolSize = sizeof(ScaleCache<T>) * st.batchSize * st.outWidth * st.outHeight;
err = hipMalloc(&cachePool, cachePoolSize);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for cache", err);
}
hipLaunchKernelGGL(( resizeAreaKernel<T>), dim3(128), dim3(128), 2048, *stream, pSt, cache, scale, inputPtr, input->specialShapeInfo(), outputPtr,
output->specialShapeInfo(), cachePool);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: An error occured with kernel running", err);
}
err = hipFree(cachePool);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for cache", err);
}
err = hipFree(pSt);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for ImageResizeState", err);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
int resizeAreaFunctor_(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
ImageResizerState st(alignCorners, false); // Create resize info
auto res = st.validateAndCalculateOutputSize(image, width, height);
auto stream = context->getCudaStream();
if (Status::OK() == res) {
CachedInterpolation* xCached;
//(st.outWidth);
auto err = hipMalloc(&xCached, sizeof(CachedInterpolation) * st.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot allocate memory for cached interpolations", err);
}
NDArray::prepareSpecialUse({output}, {image});
hipLaunchKernelGGL(( fillInterpolationCache), dim3(128), dim3(128), 256, *stream, xCached, st.outWidth, st.inWidth, st.widthScale);
resizeArea<T>(stream, st, xCached, image, output);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Error occured when kernel was running", err);
}
err = hipFree(xCached);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot deallocate memory for cached interpolations", err);
}
NDArray::registerSpecialUse({output}, {image});
}
return res;
}
int resizeAreaFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeAreaFunctor_, (context, image, width, height, alignCorners, output), NUMERIC_TYPES);
}
// ------------------------------------------------------------------------------------------------------------------ //
// simplified bicubic resize without antialiasing
//
template <typename T>
int resizeBicubicFunctorA_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
ImageResizerState st(alignCorners, halfPixelCenters); // align_corners, half_pixel_align
st.stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {image});
int res = st.validateAndCreateOutput(image, width, height);
if (res == Status::OK())
bicubicInterpolateWithCaching<T>(image, st, halfPixelCenters, output);
NDArray::registerSpecialUse({output}, {image});
return res;
}
int resizeBicubicFunctorA(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctorA_, (context,
image, width, height, alignCorners, halfPixelCenters, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (sd::LaunchContext * context,
NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
int resizeImagesFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool alignCorners, NDArray* output) {
switch (method) {
case kResizeBilinear:
return resizeBilinearFunctor(context, image, width, height, alignCorners, false, output);
case kResizeNearest:
return resizeNeighborFunctor(context, image, width, height, alignCorners, false, output);
case kResizeBicubic:
return resizeBicubicFunctor(context, image, width, height, alignCorners, false, output);
case kResizeArea:
return resizeAreaFunctor(context, image, width, height, alignCorners, output);
default:
throw std::runtime_error("helper::resizeImagesFunctor: Wrong resize method.");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// -------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel type of input(images) and output should be the same
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong const* imagesShape, Z const* boxes, Nd4jLong const* boxesShape,
I const* indices, Nd4jLong const* indexShape, I const* cropSize, Nd4jLong const* cropShape, int method,
double extrapolationVal, T* output, Nd4jLong const* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = sd::math::p_floor(inY);
const int bottomYIndex = sd::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
void cropAndResizeFunctor_(sd::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->specialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->specialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->specialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->specialBuffer());
T* outBuf = reinterpret_cast<T*>(crops->specialBuffer());
int threadsPerBlock = math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth);
if(threadsPerBlock > MAX_NUM_THREADS/4)
threadsPerBlock = MAX_NUM_THREADS/4;
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
hipLaunchKernelGGL(( cropAndResizeKernel<T,Z,I>), dim3(batchSize), dim3(threadsPerBlock), 256, *stream, imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), indexBuf, indices->specialShapeInfo(),
cropSizes, cropSize->specialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(sd::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(sd::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} | b5d5e1414c59411b90063d2e3ff16d1795542f09.cu | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
//
// @author George A. Shulinok <[email protected]>
//
#include <ops/declarable/helpers/image_resize.h>
#include <exceptions/cuda_exception.h>
#include <array/NDArrayFactory.h>
namespace sd {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
// Older incorrect scaling method that causes all resizes to have a slight
// translation leading to inconsistent results. For example, a flip then a
// resize gives different results then a resize then a flip.
struct LegacyScaler {
_CUDA_HD LegacyScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
return static_cast<float>(x) * scale;
}
};
// Half pixel scaler scales assuming that the pixel centers are at 0.5, i.e. the
// floating point coordinates of the top,left pixel is 0.5,0.5.
struct HalfPixelScaler {
_CUDA_HD HalfPixelScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
// Note that we subtract 0.5 from the return value, as the existing bilinear
// sampling code etc assumes pixels are in the old coordinate system.
return (static_cast<float>(x) + 0.5f) * scale - 0.5f;
}
};
// Utility functions
// calculateResizeScale determines the float scaling factor.
inline float calculateResizeScale(Nd4jLong inSize, Nd4jLong outSize,
bool alignCorners) {
return (alignCorners && outSize > 1)
? (inSize - 1) / static_cast<float>(outSize - 1)
: inSize / static_cast<float>(outSize);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
template <class Scaler>
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
Scaler scaler;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = scaler(i, scale);
// interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
// interpolationData[i].topIndex = sd::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
// interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
double const in_f = sd::math::p_floor<double>(in);
double const in_c = sd::math::p_ceil<double>(in);
interpolationData[i].bottomIndex = sd::math::nd4j_max(static_cast<Nd4jLong>(in_f), (Nd4jLong)0LL);//static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = sd::math::nd4j_min(static_cast<Nd4jLong>(in_c), inSize - 1);
interpolationData[i].interpolarValue = in - in_f;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T, typename Z>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, Z* outputYptr,
Nd4jLong const* outputShape, Nd4jLong batchSize, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels,
Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
for (auto batch = blockIdx.x; batch < batchSize; batch += gridDim.x ) { // blockIdx.x as batch index
auto pX = input + batch * inBatchNumValues;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T* ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T* ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + (batch * outHeight + y) * outRowSize;
for (Nd4jLong x = 0; x < outWidth; x++) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = 0; c < channels; c++) {
Z topLeft(ys_input_lower_ptr[xsBottom + c]);
Z topRight(ys_input_lower_ptr[xsTop + c]);
Z bottomLeft(ys_input_upper_ptr[xsBottom + c]);
Z bottomRight(ys_input_upper_ptr[xsTop + c]);
Z top = topLeft + (topRight - topLeft) * xVal;
Z bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
Z resVal = Z(top + (bottom - top) * yVal);
pZ[x * channels + c] = resVal;
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T, typename F>
static void resizeImage_(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const* pInput = images->getDataBuffer()->specialAsT<T>(); //reinterpret_cast<T const *>(images->specialBuffer()); // this works only with 'c' direction
F* pOutput = output->dataBuffer()->specialAsT<F>();//reinterpret_cast<F *>(output->specialBuffer());
dim3 batchSizeBlock(batchSize, 1, 1);
dim3 pictureBlock(outHeight, outWidth, channels);
resizeImageKernel<T,F><<<256, 256, 256, *stream>>>(pInput, images->specialShapeInfo(), pOutput,
output->specialShapeInfo(), batchSize, outWidth, outHeight, channels, inRowSize, outRowSize,
inBatchNumValues, xs_, ys_);
auto err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeImage_: Cannot synchronize kernel execution", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, typename F>
static int resizeBilinearFunctor_(sd::LaunchContext* context, NDArray const* images, int const width,
int const height, bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
cudaError_t err = cudaMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = cudaMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
if (halfPixelCenter) {
computeInterpolationWeights <
HalfPixelScaler ><<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_);
computeInterpolationWeights <
HalfPixelScaler ><<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_);
}
else {
computeInterpolationWeights <
LegacyScaler ><<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_);
computeInterpolationWeights <
LegacyScaler ><<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_);
}
printf("Input is %dx%d, Output is %dx%d\n", inHeight, inWidth, outHeight, outWidth);
NDArray::prepareSpecialUse({output}, {images});
resizeImage_<T,F>(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
err = cudaStreamSynchronize(*stream);
NDArray::registerSpecialUse({output}, {images});
err = cudaFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = cudaFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong const* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool alignCorners, bool halfPixelCenters) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
auto posY = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)y + 0.5f) * heightScale:(float)y * heightScale));
Nd4jLong inY = sd::math::nd4j_min(posY, inHeight - 1);
if (halfPixelCenters) {
inY = sd::math::nd4j_max(0LL, inY);
}
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto posX = alignCorners ? static_cast<Nd4jLong>(sd::math::p_round<float>(halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale)) : static_cast<Nd4jLong>(sd::math::p_floor<float>(
halfPixelCenters?((float)x + 0.5f) * widthScale:(float)x * widthScale));
Nd4jLong inX = sd::math::nd4j_min(posX, inWidth - 1);
if (halfPixelCenters) {
inX = sd::math::nd4j_max(0LL, inX);
}
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// if ((alignCorners && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (alignCorners && outHeight < 2) ||
// (alignCorners && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// // wrong input data
// nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
// return ND4J_STATUS_BAD_ARGUMENTS;
// }
// float heightScale = alignCorners ? (inHeight - 1.f) / float(outHeight - 1.f) : (inHeight / float(outHeight));
// float widthScale = alignCorners ? (inWidth - 1.f) / float(outWidth - 1.f) : (inWidth / float(outWidth));
float heightScale = calculateResizeScale(inHeight, outHeight, alignCorners);
float widthScale = calculateResizeScale(inWidth, outWidth, alignCorners);
auto imagesBuffer = images->getDataBuffer()->specialAsT<T>();//reinterpret_cast<T const*>(images->specialBuffer());
auto outputBuffer = output->dataBuffer()->specialAsT<T>();//reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {images});
resizeNeighborKernel<T><<<batchSize, outHeight * outWidth, 512, *stream>>>(imagesBuffer, images->specialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, alignCorners, halfPixelCenters);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(sd::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(),
resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels,
xs_, ys_, output), NUMERIC_TYPES, FLOAT_TYPES);
}
BUILD_DOUBLE_TEMPLATE(template void resizeImage_,(sd::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output),
NUMERIC_TYPES, FLOAT_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(sd::LaunchContext* context, NDArray const* images, int width, int height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_DOUBLE_SELECTOR(images->dataType(), output->dataType(), return resizeBilinearFunctor_, (context, images,
width, height, alignCorners, halfPixelCenter, output), NUMERIC_TYPES, FLOAT_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (sd::LaunchContext* context,
// NDArray const* images, int const width, int const height, bool const alignCorners,
// bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(sd::LaunchContext* context, NDArray const* images, int const width, int const height,
bool const alignCorners, bool const halfPixelCenter, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_,
(context, images, width, height, alignCorners, halfPixelCenter, output), LIBND4J_TYPES);
}
// BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (sd::LaunchContext* context, NDArray const* images,
// int width, int height, bool const alignCorners, bool const halfPixelCenter, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bicubic interpolation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct ImageResizerState {
explicit ImageResizerState(bool alignCorners, bool halfPixelCenters)
: _alignCorners(alignCorners),
_halfPixelCenters(halfPixelCenters) {}
// ValidateAndCalculateOutputSize checks the bounds on the input tensors
// and requested size, sets up some of the resizing state such as the
// heightScale and widthScale, and calculates the output size.
// If any of these operations fails, it sets an error status in
// the context, which the caller must check.
int validateAndCalculateOutputSize(NDArray const* input, int const width, int const height) {
//
batchSize = input->sizeAt(0);//.dim_size(0);
outHeight = height;
outWidth = width; //internal::SubtleMustCopy(Svec(1));
inHeight = static_cast<int32_t>(input->sizeAt(1));
inWidth = static_cast<int32_t>(input->sizeAt(2));
channels = input->sizeAt(3); //.dim_size(3);
heightScale = calculateResizeScale(inHeight, outHeight, _alignCorners);
widthScale = calculateResizeScale(inWidth, outWidth, _alignCorners);
// Guard against overflows
if (ceilf((outHeight - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize height");
}
if (ceilf((outWidth - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize width");
}
return Status::OK();
}
// Calculates all the required variables, and allocates the output.
int validateAndCreateOutput(NDArray const* input, int const width, int const height) {
return validateAndCalculateOutputSize(input, width, height);
}
Nd4jLong batchSize;
Nd4jLong outHeight;
Nd4jLong outWidth;
Nd4jLong inHeight;
Nd4jLong inWidth;
Nd4jLong channels;
float heightScale;
float widthScale;
NDArray* output = nullptr;
cudaStream_t* stream;
private:
bool _alignCorners;
bool _halfPixelCenters;
};
struct WeightsAndIndices {
float _weight0;
float _weight1;
float _weight2;
float _weight3;
Nd4jLong _index0;
Nd4jLong _index1;
Nd4jLong _index2;
Nd4jLong _index3;
int _advance; // advance value.
};
class CachedInterpolationCalculator {
public:
_CUDA_HD CachedInterpolationCalculator() : _indexes{-1, -1, -1, -1} {}
// Advances iteration. Returns the number of values that should be copied from
// the current point to the next point. The copying should always be done by
// copying the last <retval> values from the old point to the first <retval>
// values of the new point.
inline _CUDA_HD int Advance(const Nd4jLong x0, const Nd4jLong x1, const Nd4jLong x2,
const Nd4jLong x3) {
// We use 2 hands and walk through, copying from one to another where
// we already have values.
// Invariant, new_indicies_hand <= cached_values_hand
const Nd4jLong new_x_indices[4] = {x0, x1, x2, x3};
int cachedValuesHand = 0;
int newIndiciesHand = 0;
while (cachedValuesHand < 4) {
if (_indexes[cachedValuesHand] == new_x_indices[newIndiciesHand]) {
if (newIndiciesHand < cachedValuesHand) {
_indexes[newIndiciesHand] = _indexes[cachedValuesHand];
}
newIndiciesHand++;
}
cachedValuesHand++;
}
switch (newIndiciesHand) {
case 0:
_indexes[0] = x0;
case 1:
_indexes[1] = x1;
case 2:
_indexes[2] = x2;
case 3:
_indexes[3] = x3;
break;
}
return newIndiciesHand;
}
private:
Nd4jLong _indexes[4];
};
static __global__ void initCoefTableKernel(const double a, float* table, Nd4jLong tableSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i <= tableSize; i += step) {
float x = i * 1.0 / tableSize;
table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
}
static const Nd4jLong kTableSize = (1 << 10);
float* initCoeffsTable(const double a, cudaStream_t* stream) {
// Allocate and initialize coefficients table using Bicubic
// convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_table; // = new float[(kTableSize + 1) * 2];
auto err = cudaMalloc(&coeffs_table, sizeof(float) * ((kTableSize + 1) * 2));
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot allocate memory for vertical parts rectangulars", err);
}
initCoefTableKernel<<<128,128,128, *stream>>>(a, coeffs_table, kTableSize);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot syncronize kernel", err);
}
return coeffs_table;
}
// _CUDA_HD const float* getCoeffsTable(const bool use_keys_cubic) {
// // Static so that we initialize it on first use
// if (use_keys_cubic) {
// // http://ieeexplore.ieee.org/document/1163711/
// // R. G. Keys. Cubic convolution interpolation for digital image
// // processing. IEEE Transactions on Acoustics, Speech, and Signal
// // Processing, 29(6):1153–1160, 1981.
// //static const float* coeffs_table = initCoeffsTable(-0.5f, stream);
// return sCoeffsTableHalf;
// } else {
// //static const float* coeffs_table = initCoeffsTable(-0.75f, stream);
// return sCoeffsTableThreeFourth;
// }
// }
inline _CUDA_HD Nd4jLong bound(Nd4jLong val, Nd4jLong limit) {
return math::nd4j_min(limit - 1ll, math::nd4j_max(Nd4jLong{0}, val));
}
template <typename T>
inline _CUDA_HD float interpolate1D(const float weight0, const float weight1, const float weight2, const float weight3,
const T value0, const T value1, const T value2, const T value3) {
return static_cast<float>(value0) * weight0 +
static_cast<float>(value1) * weight1 +
static_cast<float>(value2) * weight2 +
static_cast<float>(value3) * weight3;
}
// Compute the 1D interpolation for a given X index using the y_weights
static _CUDA_HD float compute(float values[4], const float xW0, const float xW1, const float xW2, const float xW3) {
return interpolate1D(xW0, xW1, xW2, xW3, values[0], values[1],values[2], values[3]);
}
template <typename Scaler, bool use_keys_cubic>
inline _CUDA_HD void getWeightsAndIndices(float const* coeffs_table, const float scale, const Nd4jLong out_loc, const Nd4jLong limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const Nd4jLong in_loc = math::nd4j_floor<float, Nd4jLong>(in_loc_f);
const float delta = in_loc_f - in_loc;
const Nd4jLong offset = math::nd4j_round<float, Nd4jLong>(delta * kTableSize);
//const float* coeffs_table = getCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
// The legacy code placed more weight on the edge pixels, since bounding
// the set of inputs to sample could cause an edge pixel to be repeated.
// Here we change the behavior at borders to match that used by the
// scale_and_translate_op, where sampling locations outside the image have
// their weight set to 0, and the weights are renormalized so that their sum
// is 1.0.
out->_index0 = bound(in_loc - 1, limit);
out->_weight0 =
(out->_index0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->_index1 = bound(in_loc, limit);
out->_weight1 = (out->_index1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->_index2 = bound(in_loc + 1, limit);
out->_weight2 =
(out->_index2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->_index3 = bound(in_loc + 2, limit);
out->_weight3 = (out->_index3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->_weight0 + out->_weight1 + out->_weight2 + out->_weight3;
if (math::nd4j_abs(weight_sum) >= 1000.0f * DataTypeUtils::min<float>()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->_weight0 *= one_over_weight_sum;
out->_weight1 *= one_over_weight_sum;
out->_weight2 *= one_over_weight_sum;
out->_weight3 *= one_over_weight_sum;
}
} else {
out->_weight0 = coeffs_table[offset * 2 + 1];
out->_weight1 = coeffs_table[offset * 2];
out->_weight2 = coeffs_table[(kTableSize - offset) * 2];
out->_weight3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->_index0 = bound(in_loc - 1, limit);
out->_index1 = bound(in_loc, limit);
out->_index2 = bound(in_loc + 1, limit);
out->_index3 = bound(in_loc + 2, limit);
}
}
static __global__ void accumulateChannelsKernel(WeightsAndIndices* pXWais, Nd4jLong outWidth, Nd4jLong channels) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
pXWais[x]._index0 *= channels;
pXWais[x]._index1 *= channels;
pXWais[x]._index2 *= channels;
pXWais[x]._index3 *= channels;
}
}
static __global__ void advaceWeightsAndIndicesKernel(float const* cacheTable, CachedInterpolationCalculator* calc, WeightsAndIndices* pXWais, Nd4jLong inWidth, float widthScale,
Nd4jLong outWidth, Nd4jLong channels, bool halfPixelCenters) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
if (halfPixelCenters)
getWeightsAndIndices<HalfPixelScaler, true>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
else
getWeightsAndIndices<LegacyScaler, false>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
pXWais[x]._advance = calc->Advance(pXWais[x]._index0, pXWais[x]._index1, pXWais[x]._index2, pXWais[x]._index3);
}
}
// resizerState and xWais are device allocated
static void computeXWeightsAndIndices(float const* coeffsTable, const ImageResizerState& resizerState,
const bool halfPixelCenters,
WeightsAndIndices* pXWais) {
auto stream = resizerState.stream;
auto outWidth = resizerState.outWidth;
CachedInterpolationCalculator calc; // = new CachedInterpolationCalculator;
CachedInterpolationCalculator* pCalcD;
auto err = cudaMalloc(&pCalcD, sizeof(CachedInterpolationCalculator));
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot allocated device memory for interpolate calculator", err);
}
err = cudaMemcpyAsync(pCalcD, &calc, sizeof(CachedInterpolationCalculator), cudaMemcpyHostToDevice, *stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot set up device memory for interpolate calculator", err);
}
advaceWeightsAndIndicesKernel<<<128, 128, 128, *stream>>>(coeffsTable, pCalcD, pXWais, resizerState.inWidth, resizerState.widthScale, outWidth, resizerState.channels, halfPixelCenters);
err = cudaFree(pCalcD);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot deallocated device memory for interpolate calculator", err);
}
err = cudaStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after advance weights and indicers", err);
}
// Scale the values so they can be used as offsets into buffers.
accumulateChannelsKernel<<<128, 128, 512, *stream>>>(pXWais, outWidth, resizerState.channels);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after accumulate channels", err);
}
}
template <typename T>
static _CUDA_HD FORCEINLINE float computeYInterpolation(
int which, int channelNum, const WeightsAndIndices& yWai,
const T* pY0, const T* pY1, const T* pY2, const T* pY3,
const WeightsAndIndices& xWai) {
int xIndex;
switch (which) {
case 0:
xIndex = xWai._index0;
break;
case 1:
xIndex = xWai._index1;
break;
case 2:
xIndex = xWai._index2;
break;
default:
xIndex = xWai._index3;
break;
}
const Nd4jLong pt_index = xIndex + channelNum;
return interpolate1D<T>(yWai._weight0, yWai._weight1, yWai._weight2,
yWai._weight3, pY0[pt_index], pY1[pt_index],
pY2[pt_index], pY3[pt_index]);
}
template <typename T>
static __global__ void bicubicInterpolateWithCachingKernel(float const* cachedTable, T const* inputPtr, ImageResizerState* pResizerState, WeightsAndIndices* xWais, bool halfPixelCenters, Nd4jLong inBatchWidth, Nd4jLong inRowWidth, float* outputPtr) {
// auto numChannels = pResizerState->channels;
for (Nd4jLong b = blockIdx.x; b < pResizerState->batchSize; b += gridDim.x) {
auto pInput = inputPtr + b * inBatchWidth;
float* cachedValue;
for (Nd4jLong y = threadIdx.x; y < pResizerState->outHeight; y += blockDim.x) {
if (threadIdx.x == 0) {
extern __shared__ char sharedChar[];
cachedValue = reinterpret_cast<float*>(sharedChar);
}
auto pos = (b * pResizerState->outHeight + y) * pResizerState->outWidth * pResizerState->channels;
auto pOutput = &outputPtr[pos];
struct WeightsAndIndices yWai;
if (halfPixelCenters) {
getWeightsAndIndices<HalfPixelScaler, true>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
} else {
getWeightsAndIndices<LegacyScaler, false>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
}
// Make pointers represent offsets of data in inputBPtr.
const T* y_ptr_0 = pInput + yWai._index0 * inRowWidth;
const T* y_ptr_1 = pInput + yWai._index1 * inRowWidth;
const T* y_ptr_2 = pInput + yWai._index2 * inRowWidth;
const T* y_ptr_3 = pInput + yWai._index3 * inRowWidth;
if (pResizerState->channels == 3) {
// Manually unroll case of 3 channels.
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cached_value_* to fill first '_advance' values.
switch (xWai._advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
cached_value_0[0] = computeYInterpolation(0, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[0] = computeYInterpolation(0, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[0] = computeYInterpolation(0, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 1:
cached_value_0[1] = computeYInterpolation(1, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[1] = computeYInterpolation(1, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[1] = computeYInterpolation(1, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 2:
cached_value_0[2] = computeYInterpolation(2, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[2] = computeYInterpolation(2, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[2] = computeYInterpolation(2, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 3:
cached_value_0[3] = computeYInterpolation(3, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[3] = computeYInterpolation(3, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[3] = computeYInterpolation(3, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
// break;
}
pOutput[x * pResizerState->channels + 0] = compute(cached_value_0, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 1] = compute(cached_value_1, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 2] = compute(cached_value_2, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
}
} else {
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cachedValue to fill first '_advance' values.
switch (xWai._advance) {
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 1];
cachedValue[4 * c + 1] = cachedValue[4 * c + 2];
cachedValue[4 * c + 2] = cachedValue[4 * c + 3];
}
break;
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 2];
cachedValue[4 * c + 1] = cachedValue[4 * c + 3];
}
break;
case 1: {
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 3];
}
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = computeYInterpolation(0, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 1:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 1] = computeYInterpolation(1, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 2] = computeYInterpolation(2, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 3] = computeYInterpolation(3, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
// break;
}
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
pOutput[x * pResizerState->channels + c] = compute(&cachedValue[4 * c], xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3);
}
}
}
}
}
}
template <typename T>
static void
bicubicInterpolateWithCaching(NDArray const* image, ImageResizerState const& resizerState, bool const halfPixelCenters, NDArray* output) {
const auto numChannels = resizerState.channels;
const Nd4jLong inRowWidth = resizerState.inWidth * numChannels;
const Nd4jLong inBatchWidth = resizerState.inHeight * inRowWidth;
auto stream = resizerState.stream; //output->getContext()->getCudaStream();
ImageResizerState* resizerStateD;
auto err = cudaMalloc(&resizerStateD, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for resizerState", err);
}
err = cudaMemcpyAsync(resizerStateD, &resizerState, sizeof(ImageResizerState), cudaMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot set up memory for resizerState", err);
}
// float* cachedValue = nullptr;
// size_t cachedSize = sizeof(float) * (numChannels == 3 ? 0 : 4 * numChannels);
// if (cachedSize) {
// err = cudaMalloc(reinterpret_cast<void**>(&cachedValue), cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot allocate memory for cached values", err);
// }
// err = cudaMemset(cachedValue, 0, cachedSize);
// if (err != 0) {
// throw cuda_exception::build(
// "helpers::bicubicInterpolateWithCaching: Cannot set up memory for cached values", err);
// }
// }
WeightsAndIndices* xWais; //(resizerState.outWidth);
err = cudaMalloc(&xWais, sizeof(WeightsAndIndices) * resizerState.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for weights and indices", err);
}
auto coeffsTable = halfPixelCenters?initCoeffsTable(-0.5, stream): initCoeffsTable(-0.75, stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
computeXWeightsAndIndices(coeffsTable, resizerState, halfPixelCenters, xWais);
err = cudaStreamQuery(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
const T* pInput = image->getDataBuffer()->specialAsT<T>();
float* pOutput = output->dataBuffer()->specialAsT<float>(); //_data.data();
bicubicInterpolateWithCachingKernel<T><<<128, 1, 512, *stream>>>(coeffsTable, pInput,
resizerStateD, xWais, halfPixelCenters, inBatchWidth, inRowWidth, pOutput);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Kernels finished with error", err);
}
err = cudaFree(resizerStateD);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for resizerState", err);
}
// if (cachedSize)
// err = cudaFree(cachedValue);
// if (err != 0) {
// throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for cached values", err);
// }
err = cudaFree(xWais);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for weights and indices", err);
}
err = cudaFree(coeffsTable);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for coefficients table", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
int resizeBicubicFunctor_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
return Status::OK();
}
int resizeBicubicFunctor(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctor_, (context, image,
width, height, preserveAspectRatio, antialias, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctor_, (sd::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
struct CachedInterpolation {
Nd4jLong start;
Nd4jLong end;
float startScale;
float endMinusOneScale;
bool needsBounding;
};
static __global__ void fillInterpolationCache(CachedInterpolation* xCached, Nd4jLong cacheLen, Nd4jLong inWidth, float widthScale) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto increment = blockDim.x * gridDim.x;
for (auto x = start; x < cacheLen; x += increment) {
auto& xCache = xCached[x];
const float inX = x * widthScale;
const float inX1 = (x + 1) * widthScale;
Nd4jLong v = math::nd4j_floor<float, Nd4jLong>(inX);
xCache.start = v;
xCache.startScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
v = math::nd4j_ceil<float, Nd4jLong>(inX1);
xCache.end = v--;
xCache.endMinusOneScale = v < inX ? (v + 1 > inX1 ? widthScale : v + 1 - inX) : (v + 1 > inX1 ? inX1 - v : 1.f);
xCache.needsBounding = bound(xCache.start, inWidth) != xCache.start || bound(xCache.end - 1, inWidth) != (xCache.end - 1);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
struct ScaleCache {
float yScale;
T const* yPtr;
};
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSumOf3Channels(float scale,
const ImageResizerState& st,
ScaleCache<T> const* yScaleCache,
Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
for (int i = 0; i < ptrsLen; ++i) {
const T* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
Nd4jLong offset = 3 * boundIfNeeded(xCache.start, st.inWidth);
float sum_y_0 = static_cast<float>(ptr[offset + 0]) * scaleX;
float sum_y_1 = static_cast<float>(ptr[offset + 1]) * scaleX;
float sum_y_2 = static_cast<float>(ptr[offset + 2]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
Nd4jLong offset = 3 * boundIfNeeded(x, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]);
sum_y_1 += static_cast<float>(ptr[offset + 1]);
sum_y_2 += static_cast<float>(ptr[offset + 2]);
}
scaleX = xCache.endMinusOneScale;
offset = st.channels * boundIfNeeded(xCache.end - 1, st.inWidth);
sum_y_0 += static_cast<float>(ptr[offset + 0]) * scaleX;
sum_y_1 += static_cast<float>(ptr[offset + 1]) * scaleX;
sum_y_2 += static_cast<float>(ptr[offset + 2]) * scaleX;
}
sum_0 += sum_y_0 * yScaleCache[i].yScale;
sum_1 += sum_y_1 * yScaleCache[i].yScale;
sum_2 += sum_y_2 * yScaleCache[i].yScale;
}
outputPtr[0] = sum_0 * scale;
outputPtr[1] = sum_1 * scale;
outputPtr[2] = sum_2 * scale;
}
// Computes the sum of all x values defined by <x_interp> taken across
// the y offsets and scales defined by y_ptrs and y_scales, for channel c.
//
// Note that <NeedsXBounding> is a template parameter to avoid a performance
// penalty from dynamically checking it.
template <typename T>
static __device__ void computePatchSum(float scale, const ImageResizerState& st,
ScaleCache<T> const* yScaleCache, Nd4jLong ptrsLen,
const CachedInterpolation& xCache,
float* outputPtr) {
bool const needsXBounding = xCache.needsBounding;
auto boundIfNeeded = [needsXBounding](Nd4jLong x, Nd4jLong y) -> Nd4jLong {
return (needsXBounding ? bound(x, y) : (x));
};
const auto numChannels = st.channels;
for (Nd4jLong c = 0; c < numChannels; ++c) {
float sum = 0;
for (int i = 0; i < ptrsLen; ++i) {
T const* ptr = yScaleCache[i].yPtr;
float scaleX = xCache.startScale;
float sumY = static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.start, st.inWidth) + c]) * scaleX;
if (xCache.start + 1 != xCache.end) {
for (Nd4jLong x = xCache.start + 1; x < xCache.end - 1; ++x) {
sumY += static_cast<float>(
ptr[numChannels * boundIfNeeded(x, st.inWidth) + c]);
}
scaleX = xCache.endMinusOneScale;
sumY += static_cast<float>(ptr[numChannels * boundIfNeeded(xCache.end - 1, st.inWidth) + c]) * scaleX;
}
sum += sumY * yScaleCache[i].yScale;
}
outputPtr[c] = sum * scale;
}
}
template <typename T>
static __global__ void resizeAreaKernel(ImageResizerState const* pSt, CachedInterpolation const* caches, float scale,
T const* inputPtr, Nd4jLong const* inputShape, float* outputPtr, Nd4jLong const* outputShape, ScaleCache<T>* cachePool) { //batch * outWidth * outHeight
for (auto batch = blockIdx.x; batch < pSt->batchSize; batch += gridDim.x) {
for (auto y = threadIdx.x; y < pSt->outHeight; y += blockDim.x) {
const float inY = y * pSt->heightScale;
const float inY1 = (y + 1) * pSt->heightScale;
// The start and end height indices of all the cells that could
// contribute to the target cell.
const Nd4jLong yStart = math::nd4j_floor<float, Nd4jLong>(inY);
const Nd4jLong yEnd = math::nd4j_ceil<float, Nd4jLong>(inY1);
auto scalesDim = yEnd - yStart;
auto yScaleCache = cachePool + (batch * pSt->outHeight + y) * pSt->outWidth;
//auto startPtr = sharedPtr + y * scalesDim * sizeof(float);
//float* yScales = yScalesShare + y * sizeof(float) * scalesDim;//reinterpret_cast<float*>(startPtr); //shared + y * scalesDim * y + scalesDim * sizeof(T const *) [scalesDim];
//T const** yPtrs = yPtrsShare + y * sizeof(T const*) * scalesDim; //[scalesDim];
//yPtrs = reinterpret_cast<T const**>(sharedBuf);
float* output = outputPtr + (batch * pSt->outHeight + y) * pSt->channels * pSt->outWidth;
//int k = 0;
for (Nd4jLong i = yStart, k = 0; i < yEnd; ++i, ++k) {
float scaleY;
if (i < inY) {
scaleY = (i + 1 > inY1 ? pSt->heightScale : i + 1 - inY);
} else {
scaleY = (i + 1 > inY1 ? inY1 - i : 1.0);
}
yScaleCache[k].yScale = scaleY;
yScaleCache[k].yPtr = inputPtr + (batch * pSt->inHeight * pSt->inWidth * pSt->channels + bound(i, pSt->inHeight) * pSt->inWidth * pSt->channels);
}
if (pSt->channels == 3) {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation& xCache = caches[x];
computePatchSumOf3Channels<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
} else {
for (Nd4jLong x = 0; x < pSt->outWidth; ++x) {
const CachedInterpolation &xCache = caches[x];
computePatchSum<T>(scale, *pSt, yScaleCache, scalesDim, xCache, output);
output += pSt->channels;
}
}
}
}
}
template <typename T>
static void resizeArea(cudaStream_t* stream, ImageResizerState const& st, CachedInterpolation* cache,
NDArray const* input, NDArray* output) {
T const* inputPtr = reinterpret_cast<T const*>(input->specialBuffer());
// float* yScales;
// T const** yPtrs;
float scale = 1.f / (st.heightScale * st.widthScale);
auto outputPtr = reinterpret_cast<float*>(output->specialBuffer()); // output is always float. TO DO: provide another float types also with template <typename X, typename Z> declaration
ImageResizerState* pSt;
auto err = cudaMalloc(&pSt, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for ImageResizerState", err);
}
err = cudaMemcpyAsync(pSt, &st, sizeof(ImageResizerState), cudaMemcpyHostToDevice, *stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot copy to device memory", err);
}
ScaleCache<T>* cachePool;
auto cachePoolSize = sizeof(ScaleCache<T>) * st.batchSize * st.outWidth * st.outHeight;
err = cudaMalloc(&cachePool, cachePoolSize);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot allocate memory for cache", err);
}
resizeAreaKernel<T><<<128, 128, 2048, *stream>>>(pSt, cache, scale, inputPtr, input->specialShapeInfo(), outputPtr,
output->specialShapeInfo(), cachePool);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: An error occured with kernel running", err);
}
err = cudaFree(cachePool);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for cache", err);
}
err = cudaFree(pSt);
if (err != 0) {
throw cuda_exception::build("helpers::resizeArea: Cannot deallocate memory for ImageResizeState", err);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
int resizeAreaFunctor_(sd::LaunchContext* context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
ImageResizerState st(alignCorners, false); // Create resize info
auto res = st.validateAndCalculateOutputSize(image, width, height);
auto stream = context->getCudaStream();
if (Status::OK() == res) {
CachedInterpolation* xCached;
//(st.outWidth);
auto err = cudaMalloc(&xCached, sizeof(CachedInterpolation) * st.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot allocate memory for cached interpolations", err);
}
NDArray::prepareSpecialUse({output}, {image});
fillInterpolationCache<<<128, 128, 256, *stream>>>(xCached, st.outWidth, st.inWidth, st.widthScale);
resizeArea<T>(stream, st, xCached, image, output);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Error occured when kernel was running", err);
}
err = cudaFree(xCached);
if (err != 0) {
throw cuda_exception::build("helpers::resizeAreaFunctor_: Cannot deallocate memory for cached interpolations", err);
}
NDArray::registerSpecialUse({output}, {image});
}
return res;
}
int resizeAreaFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
bool const alignCorners, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeAreaFunctor_, (context, image, width, height, alignCorners, output), NUMERIC_TYPES);
}
// ------------------------------------------------------------------------------------------------------------------ //
// simplified bicubic resize without antialiasing
//
template <typename T>
int resizeBicubicFunctorA_(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
ImageResizerState st(alignCorners, halfPixelCenters); // align_corners, half_pixel_align
st.stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {image});
int res = st.validateAndCreateOutput(image, width, height);
if (res == Status::OK())
bicubicInterpolateWithCaching<T>(image, st, halfPixelCenters, output);
NDArray::registerSpecialUse({output}, {image});
return res;
}
int resizeBicubicFunctorA(sd::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctorA_, (context,
image, width, height, alignCorners, halfPixelCenters, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (sd::LaunchContext * context,
NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
int resizeImagesFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool alignCorners, NDArray* output) {
switch (method) {
case kResizeBilinear:
return resizeBilinearFunctor(context, image, width, height, alignCorners, false, output);
case kResizeNearest:
return resizeNeighborFunctor(context, image, width, height, alignCorners, false, output);
case kResizeBicubic:
return resizeBicubicFunctor(context, image, width, height, alignCorners, false, output);
case kResizeArea:
return resizeAreaFunctor(context, image, width, height, alignCorners, output);
default:
throw std::runtime_error("helper::resizeImagesFunctor: Wrong resize method.");
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// -------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel type of input(images) and output should be the same
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong const* imagesShape, Z const* boxes, Nd4jLong const* boxesShape,
I const* indices, Nd4jLong const* indexShape, I const* cropSize, Nd4jLong const* cropShape, int method,
double extrapolationVal, T* output, Nd4jLong const* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = sd::math::p_floor(inY);
const int bottomYIndex = sd::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
void cropAndResizeFunctor_(sd::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->specialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->specialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->specialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->specialBuffer());
T* outBuf = reinterpret_cast<T*>(crops->specialBuffer());
int threadsPerBlock = math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth);
if(threadsPerBlock > MAX_NUM_THREADS/4)
threadsPerBlock = MAX_NUM_THREADS/4;
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
cropAndResizeKernel<T,Z,I><<<batchSize, threadsPerBlock, 256, *stream>>>(imagesBuf, images->specialShapeInfo(), boxesBuf, boxes->specialShapeInfo(), indexBuf, indices->specialShapeInfo(),
cropSizes, cropSize->specialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(sd::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(sd::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} |
57e7d72362f45f68bc68759c9d468d88d6e726be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2) {
comp += (+1.8470E29f * var_1 + var_2);
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3);
hipDeviceSynchronize();
return 0;
}
| 57e7d72362f45f68bc68759c9d468d88d6e726be.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2) {
comp += (+1.8470E29f * var_1 + var_2);
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3);
cudaDeviceSynchronize();
return 0;
}
|
solution.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "wb.h"
float* readFile(char* fileName,int *len){
FILE *fp = fopen(fileName,"r");
fscanf(fp,"%d",len);
float* inp = (float*)malloc(sizeof(float)*(*len));
for(int i=0;i<(*len);i++) fscanf(fp,"%f",&inp[i]);
fclose(fp);
return inp;
}
bool isEqual(float *a,float *b,int n){
for(int i=0;i<n;i++){
if(a[i]!=b[i]) return false;
}
return true;
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostInput, *hostOutput, *deviceOutput, *expectedOutput;
int num_elements;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = readFile(wbArg_getInputFile(args, 0), &num_elements);
expectedOutput = readFile(wbArg_getInputFile(args,1), &num_elements);
hostOutput = (float*)malloc(sizeof(float)*num_elements);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", num_elements);
wbTime_start(GPU, "Allocating GPU memory.");
hipMalloc((void**)&deviceOutput,sizeof(float)*num_elements);
hipMemcpy(deviceOutput,hostInput,sizeof(float)*num_elements,hipMemcpyHostToDevice);
thrust::device_ptr<float> dev_ptr(deviceOutput);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(Compute, "Doing the computation on the GPU");
thrust::inclusive_scan(dev_ptr,dev_ptr+num_elements,dev_ptr);
wbTime_stop(Compute, "Doing the computation on the GPU");
hipMemcpy(hostOutput,deviceOutput,sizeof(float)*num_elements,hipMemcpyDeviceToHost);
if(isEqual(hostOutput,expectedOutput,num_elements)) printf("Solution Verified\n");
else printf("Wrong Solution\n");
// Free Memory
free(hostInput);
free(hostOutput);
free(expectedOutput);
hipFree(deviceOutput);
return 0;
}
| solution.cu | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "wb.h"
float* readFile(char* fileName,int *len){
FILE *fp = fopen(fileName,"r");
fscanf(fp,"%d",len);
float* inp = (float*)malloc(sizeof(float)*(*len));
for(int i=0;i<(*len);i++) fscanf(fp,"%f",&inp[i]);
fclose(fp);
return inp;
}
bool isEqual(float *a,float *b,int n){
for(int i=0;i<n;i++){
if(a[i]!=b[i]) return false;
}
return true;
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostInput, *hostOutput, *deviceOutput, *expectedOutput;
int num_elements;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = readFile(wbArg_getInputFile(args, 0), &num_elements);
expectedOutput = readFile(wbArg_getInputFile(args,1), &num_elements);
hostOutput = (float*)malloc(sizeof(float)*num_elements);
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", num_elements);
wbTime_start(GPU, "Allocating GPU memory.");
cudaMalloc((void**)&deviceOutput,sizeof(float)*num_elements);
cudaMemcpy(deviceOutput,hostInput,sizeof(float)*num_elements,cudaMemcpyHostToDevice);
thrust::device_ptr<float> dev_ptr(deviceOutput);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(Compute, "Doing the computation on the GPU");
thrust::inclusive_scan(dev_ptr,dev_ptr+num_elements,dev_ptr);
wbTime_stop(Compute, "Doing the computation on the GPU");
cudaMemcpy(hostOutput,deviceOutput,sizeof(float)*num_elements,cudaMemcpyDeviceToHost);
if(isEqual(hostOutput,expectedOutput,num_elements)) printf("Solution Verified\n");
else printf("Wrong Solution\n");
// Free Memory
free(hostInput);
free(hostOutput);
free(expectedOutput);
cudaFree(deviceOutput);
return 0;
}
|
a5fb7eacd7c90caa3939f65d6a85585c224c6093.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
#include <gunrock/graphio/rmat.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
#include "EvqueueManager.h"
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
" test_bfs <graph type> <graph type args> [--device=<device_index>]\n"
" [--undirected] [--src=<source_index>] [--idempotence=<0|1>] [--v]\n"
" [--instrumented] [--iteration-num=<num>] [--traversal-mode=<0|1>]\n"
" [--quick=<0|1>] [--mark-pred] [--queue-sizing=<scale factor>] "
"\n"
"Graph types and args:\n"
" market <file>\n"
" Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the test. [Default: 0].\n"
" --undirected Treat the graph as undirected (symmetric).\n"
" --idempotence=<0 or 1> Enable: 1, Disable: 0 [Default: Enable].\n"
" --instrumented Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty\n"
" (a relative indicator of load imbalance.)\n"
" --src=<source vertex id> Begins BFS from the source [Default: 0].\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
" --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n"
" --mark-pred Keep both label info and predecessor info.\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at: \n"
" (graph-edges * <scale factor>). [Default: 1.0]\n"
" --v Print verbose per iteration debug info.\n"
" --iteration-num=<number> Number of runs to perform the test [Default: 1].\n"
" --traversal-mode=<0 or 1> Set traversal strategy, 0 for Load-Balanced, \n"
" 1 for Dynamic-Cooperative [Default: dynamic\n"
" determine based on average degree].\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] source_path Search depth from the source for each node.
* @param[in] preds Predecessor node id for each node.
* @param[in] nodes Number of nodes in the graph.
* @param[in] MARK_PREDECESSORS Whether to show predecessor of each node.
* @param[in] ENABLE_IDEMPOTENCE Whether to enable idempotence mode.
*/
template<typename VertexId, typename SizeT>
void DisplaySolution(
VertexId *labels,
VertexId *preds,
SizeT num_nodes,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE)
{
if (num_nodes > 40) num_nodes = 40;
printf("\nFirst %d labels of the GPU result:\n", num_nodes);
PrintFormatArray (labels, num_nodes, "%4d", 10);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE)
{
printf("\nFirst %d predecessors:\n", num_nodes);
PrintFormatArray (preds, num_nodes, "%4d", 10);
}
/*
printf("[");
for (VertexId i = 0; i < num_nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(labels[i]);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE)
{
printf(",");
PrintValue(preds[i]);
}
printf(" ");
}
printf("]\n");
*/
}
/**
* Performance/Evaluation statistics
*/
struct Stats
{
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam MARK_PREDECESSORS
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] src Source node where BFS starts
* @param[in] h_labels Host-side vector stores computed labels for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] search_depth Maximum search depth of the BFS algorithm
* @param[in] total_queued Total element queued in BFS kernel running process
* @param[in] avg_duty Average duty of the BFS kernels
*/
template<
bool MARK_PREDECESSORS,
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
VertexId src,
VertexId *h_labels,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
VertexId search_depth,
long long total_queued,
double avg_duty)
{
// Compute nodes and edges visited
SizeT edges_visited = 0;
SizeT nodes_visited = 0;
for (VertexId i = 0; i < graph.nodes; ++i)
{
if (h_labels[i] > -1)
{
++nodes_visited;
edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i];
}
}
double redundant_work = 0.0;
if (total_queued > 0)
{
// measure duplicate edges put through queue
redundant_work = ((double)total_queued - edges_visited) / edges_visited;
}
redundant_work *= 100;
// Display test name
printf("[%s] finished. ", stats.name);
// Display statistics
if (nodes_visited < 5)
{
printf("Fewer than 5 vertices visited.\n");
}
else
{
// Display the specific sample statistics
double m_teps = (double) edges_visited / (elapsed * 1000.0);
printf("\n elapsed: %.4f ms, rate: %.4f MiEdges/s", elapsed, m_teps);
if (search_depth != 0)
printf(", search_depth: %lld", (long long) search_depth);
if (avg_duty != 0)
{
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n src: %lld, nodes_visited: %lld, edges_visited: %lld",
(long long) src, (long long) nodes_visited, (long long) edges_visited);
if (total_queued > 0)
{
printf(", total queued: %lld", total_queued);
}
if (redundant_work > 0)
{
printf(", redundant work: %.2f%%", redundant_work);
}
printf("\n");
}
}
/******************************************************************************
* BFS Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] source_path Host-side vector to store CPU computed labels for each node
* @param[in] predecessor Host-side vector to store CPU computed predecessor for each node
* @param[in] src Source node where BFS starts
*/
template<
typename VertexId,
typename Value,
typename SizeT,
bool MARK_PREDECESSORS>
void SimpleReferenceBfs(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *source_path,
VertexId *predecessor,
VertexId src)
{
// Initialize distances
for (VertexId i = 0; i < graph.nodes; ++i)
{
source_path[i] = -1;
if (MARK_PREDECESSORS)
predecessor[i] = -1;
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
// Perform BFS
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
// Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
if (MARK_PREDECESSORS)
predecessor[neighbor] = dequeued_node;
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
if (MARK_PREDECESSORS)
predecessor[src] = -1;
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
printf("CPU BFS finished in %lf msec. cpu_search_depth: %d\n",
elapsed, search_depth);
}
/**
* @brief Run BFS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam MARK_PREDECESSORS
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node where BFS starts
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] max_queue_sizing Scaling factor used in edge mapping
* @param[in] iterations Number of iterations for running the test
* @param[in] traversal_mode Graph traversal mode: Load-balanced or Dynamic cooperative
* @param[in] context CudaContext pointer for moderngpu APIs
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
int iterations,
int traversal_mode,
CudaContext& context)
{
typedef BFSProblem<
VertexId,
SizeT,
Value,
MARK_PREDECESSORS,
ENABLE_IDEMPOTENCE,
(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE)> Problem; // does not use double buffer
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_preds = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check_label = (g_quick) ? NULL : reference_labels;
VertexId *reference_check_preds = NULL;
VertexId *h_preds = NULL;
if (MARK_PREDECESSORS)
{
h_preds = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
if (!g_quick)
{
reference_check_preds = reference_preds;
}
}
// Allocate BFS enactor map
BFSEnactor<INSTRUMENT> bfs_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"Problem BFS Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check_label != NULL)
{
printf("Computing reference value ...\n");
SimpleReferenceBfs<VertexId, Value, SizeT, MARK_PREDECESSORS>(
graph,
reference_check_label,
reference_check_preds,
src);
printf("\n");
}
Stats *stats = new Stats("GPU BFS");
long long total_queued = 0;
VertexId search_depth = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
float elapsed = 0.0f;
iterations = 100;
struct timeval start, end;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(src, bfs_enactor.GetFrontierType(),
max_queue_sizing),
"BFS Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
gettimeofday(&start, NULL);
util::GRError(
bfs_enactor.template Enact<Problem>(context, csr_problem, src,
max_grid_size, traversal_mode),
"BFS Problem Enact Failed", __FILE__, __LINE__);
gettimeofday(&end, NULL);
std::cerr << "[BFS] ---- " << (end.tv_sec - start.tv_sec)*1000000+(end.tv_usec - start.tv_usec) << std::endl;
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
bfs_enactor.GetStatistics(total_queued, search_depth, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_labels, h_preds),
"BFS Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check_label != NULL)
{
if (!ENABLE_IDEMPOTENCE)
{
printf("Label Validity: ");
int error_num = CompareResults(
h_labels, reference_check_label, graph.nodes, true);
if (error_num > 0)
printf("%d errors occurred.\n", error_num);
}
else
{
if (!MARK_PREDECESSORS)
{
printf("Label Validity: ");
int error_num = CompareResults(
h_labels, reference_check_label, graph.nodes, true);
if (error_num > 0)
printf("%d errors occurred.\n", error_num);
}
}
}
// Display Solution
DisplaySolution(
h_labels, h_preds, graph.nodes, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE);
DisplayStats<MARK_PREDECESSORS>(
*stats,
src,
h_labels,
graph,
elapsed,
search_depth,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_labels) free(reference_labels);
if (h_labels) free(h_labels);
if (h_preds) free(h_preds);
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
VertexId src = -1; // Use whatever the specified graph-type's default is
std::string src_str;
bool instrumented = 0; // Whether or not to collect instrumentation from kernels
bool mark_pred = 0; // Whether or not to mark src-distance vs. parent vertices
bool idempotence = 1; // Whether or not to enable idempotence operation
int max_grid_size = 0; // Maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0; // Maximum size scaling factor for work queues (e.g., 1.0 creates n and m-element vertex and edge frontiers).
int iterations = 1; // Number of runs for testing
int traversal_mode = -1; // Load-balacned or Dynamic cooperative
g_quick = 1; // Whether or not to skip reference validation
// source vertex
args.GetCmdLineArgument("src", src_str);
if (src_str.empty())
{
src = 0;
}
else if (src_str.compare("randomize") == 0)
{
src = graphio::RandomNode(graph.nodes);
}
else if (src_str.compare("largestdegree") == 0)
{
int max_degree;
src = graph.GetNodeWithHighestDegree(max_degree);
printf("Using highest degree (%d) vertex: %d\n", max_degree, src);
}
else
{
args.GetCmdLineArgument("src", src);
}
// traversal mode
args.GetCmdLineArgument("traversal-mode", traversal_mode);
if (traversal_mode == -1)
{
traversal_mode = graph.GetAverageDegree() > 8 ? 0 : 1;
}
// printf("Display neighbor list of src:\n");
// graph.DisplayNeighborList(src);
mark_pred = args.CheckCmdLineFlag("mark-pred");
g_verbose = args.CheckCmdLineFlag("v");
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("quick", g_quick);
args.GetCmdLineArgument("iteration-num", iterations);
args.GetCmdLineArgument("grid-size", max_grid_size);
args.GetCmdLineArgument("idempotence", idempotence);
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
if (instrumented)
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, true, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, true, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, false, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, false, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
}
else
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, true, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, true, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, false, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, false, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
EvqueueCreate(2);
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//hipSetDeviceFlags(hipDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
RunTests(csr, args, *context);
}
else if (graph_type == "rmat")
{
// parse rmat parameters
SizeT rmat_nodes = 1 << 10;
SizeT rmat_edges = 1 << 10;
double rmat_a = 0.55;
double rmat_b = 0.2;
double rmat_c = 0.2;
double rmat_d = 0.05;
if (graphio::BuildRmatGraph<false>(
rmat_nodes,
rmat_edges,
csr,
g_undirected,
rmat_a,
rmat_b,
rmat_c,
rmat_d) != 0)
{
return 1;
}
csr.PrintHistogram();
RunTests(csr, args, *context);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
EvqueueDestroy();
return 0;
}
| a5fb7eacd7c90caa3939f65d6a85585c224c6093.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
#include <gunrock/graphio/rmat.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
#include "EvqueueManager.h"
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
" test_bfs <graph type> <graph type args> [--device=<device_index>]\n"
" [--undirected] [--src=<source_index>] [--idempotence=<0|1>] [--v]\n"
" [--instrumented] [--iteration-num=<num>] [--traversal-mode=<0|1>]\n"
" [--quick=<0|1>] [--mark-pred] [--queue-sizing=<scale factor>] "
"\n"
"Graph types and args:\n"
" market <file>\n"
" Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the test. [Default: 0].\n"
" --undirected Treat the graph as undirected (symmetric).\n"
" --idempotence=<0 or 1> Enable: 1, Disable: 0 [Default: Enable].\n"
" --instrumented Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty\n"
" (a relative indicator of load imbalance.)\n"
" --src=<source vertex id> Begins BFS from the source [Default: 0].\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
" --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n"
" --mark-pred Keep both label info and predecessor info.\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at: \n"
" (graph-edges * <scale factor>). [Default: 1.0]\n"
" --v Print verbose per iteration debug info.\n"
" --iteration-num=<number> Number of runs to perform the test [Default: 1].\n"
" --traversal-mode=<0 or 1> Set traversal strategy, 0 for Load-Balanced, \n"
" 1 for Dynamic-Cooperative [Default: dynamic\n"
" determine based on average degree].\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] source_path Search depth from the source for each node.
* @param[in] preds Predecessor node id for each node.
* @param[in] nodes Number of nodes in the graph.
* @param[in] MARK_PREDECESSORS Whether to show predecessor of each node.
* @param[in] ENABLE_IDEMPOTENCE Whether to enable idempotence mode.
*/
template<typename VertexId, typename SizeT>
void DisplaySolution(
VertexId *labels,
VertexId *preds,
SizeT num_nodes,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE)
{
if (num_nodes > 40) num_nodes = 40;
printf("\nFirst %d labels of the GPU result:\n", num_nodes);
PrintFormatArray (labels, num_nodes, "%4d", 10);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE)
{
printf("\nFirst %d predecessors:\n", num_nodes);
PrintFormatArray (preds, num_nodes, "%4d", 10);
}
/*
printf("[");
for (VertexId i = 0; i < num_nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(labels[i]);
if (MARK_PREDECESSORS && !ENABLE_IDEMPOTENCE)
{
printf(",");
PrintValue(preds[i]);
}
printf(" ");
}
printf("]\n");
*/
}
/**
* Performance/Evaluation statistics
*/
struct Stats
{
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) :
name(name), rate(), search_depth(), redundant_work(), duty() {}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam MARK_PREDECESSORS
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] src Source node where BFS starts
* @param[in] h_labels Host-side vector stores computed labels for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] search_depth Maximum search depth of the BFS algorithm
* @param[in] total_queued Total element queued in BFS kernel running process
* @param[in] avg_duty Average duty of the BFS kernels
*/
template<
bool MARK_PREDECESSORS,
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
VertexId src,
VertexId *h_labels,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
VertexId search_depth,
long long total_queued,
double avg_duty)
{
// Compute nodes and edges visited
SizeT edges_visited = 0;
SizeT nodes_visited = 0;
for (VertexId i = 0; i < graph.nodes; ++i)
{
if (h_labels[i] > -1)
{
++nodes_visited;
edges_visited += graph.row_offsets[i+1] - graph.row_offsets[i];
}
}
double redundant_work = 0.0;
if (total_queued > 0)
{
// measure duplicate edges put through queue
redundant_work = ((double)total_queued - edges_visited) / edges_visited;
}
redundant_work *= 100;
// Display test name
printf("[%s] finished. ", stats.name);
// Display statistics
if (nodes_visited < 5)
{
printf("Fewer than 5 vertices visited.\n");
}
else
{
// Display the specific sample statistics
double m_teps = (double) edges_visited / (elapsed * 1000.0);
printf("\n elapsed: %.4f ms, rate: %.4f MiEdges/s", elapsed, m_teps);
if (search_depth != 0)
printf(", search_depth: %lld", (long long) search_depth);
if (avg_duty != 0)
{
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n src: %lld, nodes_visited: %lld, edges_visited: %lld",
(long long) src, (long long) nodes_visited, (long long) edges_visited);
if (total_queued > 0)
{
printf(", total queued: %lld", total_queued);
}
if (redundant_work > 0)
{
printf(", redundant work: %.2f%%", redundant_work);
}
printf("\n");
}
}
/******************************************************************************
* BFS Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference BFS ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] source_path Host-side vector to store CPU computed labels for each node
* @param[in] predecessor Host-side vector to store CPU computed predecessor for each node
* @param[in] src Source node where BFS starts
*/
template<
typename VertexId,
typename Value,
typename SizeT,
bool MARK_PREDECESSORS>
void SimpleReferenceBfs(
const Csr<VertexId, Value, SizeT> &graph,
VertexId *source_path,
VertexId *predecessor,
VertexId src)
{
// Initialize distances
for (VertexId i = 0; i < graph.nodes; ++i)
{
source_path[i] = -1;
if (MARK_PREDECESSORS)
predecessor[i] = -1;
}
source_path[src] = 0;
VertexId search_depth = 0;
// Initialize queue for managing previously-discovered nodes
std::deque<VertexId> frontier;
frontier.push_back(src);
//
// Perform BFS
//
CpuTimer cpu_timer;
cpu_timer.Start();
while (!frontier.empty())
{
// Dequeue node from frontier
VertexId dequeued_node = frontier.front();
frontier.pop_front();
VertexId neighbor_dist = source_path[dequeued_node] + 1;
// Locate adjacency list
int edges_begin = graph.row_offsets[dequeued_node];
int edges_end = graph.row_offsets[dequeued_node + 1];
for (int edge = edges_begin; edge < edges_end; ++edge)
{
// Lookup neighbor and enqueue if undiscovered
VertexId neighbor = graph.column_indices[edge];
if (source_path[neighbor] == -1)
{
source_path[neighbor] = neighbor_dist;
if (MARK_PREDECESSORS)
predecessor[neighbor] = dequeued_node;
if (search_depth < neighbor_dist)
{
search_depth = neighbor_dist;
}
frontier.push_back(neighbor);
}
}
}
if (MARK_PREDECESSORS)
predecessor[src] = -1;
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
search_depth++;
printf("CPU BFS finished in %lf msec. cpu_search_depth: %d\n",
elapsed, search_depth);
}
/**
* @brief Run BFS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam MARK_PREDECESSORS
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node where BFS starts
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] max_queue_sizing Scaling factor used in edge mapping
* @param[in] iterations Number of iterations for running the test
* @param[in] traversal_mode Graph traversal mode: Load-balanced or Dynamic cooperative
* @param[in] context CudaContext pointer for moderngpu APIs
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool MARK_PREDECESSORS,
bool ENABLE_IDEMPOTENCE>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
int max_grid_size,
int num_gpus,
double max_queue_sizing,
int iterations,
int traversal_mode,
CudaContext& context)
{
typedef BFSProblem<
VertexId,
SizeT,
Value,
MARK_PREDECESSORS,
ENABLE_IDEMPOTENCE,
(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE)> Problem; // does not use double buffer
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_preds = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_labels = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check_label = (g_quick) ? NULL : reference_labels;
VertexId *reference_check_preds = NULL;
VertexId *h_preds = NULL;
if (MARK_PREDECESSORS)
{
h_preds = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
if (!g_quick)
{
reference_check_preds = reference_preds;
}
}
// Allocate BFS enactor map
BFSEnactor<INSTRUMENT> bfs_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"Problem BFS Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU BFS solution for source-distance
//
if (reference_check_label != NULL)
{
printf("Computing reference value ...\n");
SimpleReferenceBfs<VertexId, Value, SizeT, MARK_PREDECESSORS>(
graph,
reference_check_label,
reference_check_preds,
src);
printf("\n");
}
Stats *stats = new Stats("GPU BFS");
long long total_queued = 0;
VertexId search_depth = 0;
double avg_duty = 0.0;
// Perform BFS
GpuTimer gpu_timer;
float elapsed = 0.0f;
iterations = 100;
struct timeval start, end;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(src, bfs_enactor.GetFrontierType(),
max_queue_sizing),
"BFS Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
gettimeofday(&start, NULL);
util::GRError(
bfs_enactor.template Enact<Problem>(context, csr_problem, src,
max_grid_size, traversal_mode),
"BFS Problem Enact Failed", __FILE__, __LINE__);
gettimeofday(&end, NULL);
std::cerr << "[BFS] ---- " << (end.tv_sec - start.tv_sec)*1000000+(end.tv_usec - start.tv_usec) << std::endl;
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
}
elapsed /= iterations;
bfs_enactor.GetStatistics(total_queued, search_depth, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_labels, h_preds),
"BFS Problem Data Extraction Failed", __FILE__, __LINE__);
// Verify the result
if (reference_check_label != NULL)
{
if (!ENABLE_IDEMPOTENCE)
{
printf("Label Validity: ");
int error_num = CompareResults(
h_labels, reference_check_label, graph.nodes, true);
if (error_num > 0)
printf("%d errors occurred.\n", error_num);
}
else
{
if (!MARK_PREDECESSORS)
{
printf("Label Validity: ");
int error_num = CompareResults(
h_labels, reference_check_label, graph.nodes, true);
if (error_num > 0)
printf("%d errors occurred.\n", error_num);
}
}
}
// Display Solution
DisplaySolution(
h_labels, h_preds, graph.nodes, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE);
DisplayStats<MARK_PREDECESSORS>(
*stats,
src,
h_labels,
graph,
elapsed,
search_depth,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_labels) free(reference_labels);
if (h_labels) free(h_labels);
if (h_preds) free(h_preds);
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
* @param[in] context CudaContext pointer for moderngpu APIs
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args,
CudaContext& context)
{
VertexId src = -1; // Use whatever the specified graph-type's default is
std::string src_str;
bool instrumented = 0; // Whether or not to collect instrumentation from kernels
bool mark_pred = 0; // Whether or not to mark src-distance vs. parent vertices
bool idempotence = 1; // Whether or not to enable idempotence operation
int max_grid_size = 0; // Maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
double max_queue_sizing = 1.0; // Maximum size scaling factor for work queues (e.g., 1.0 creates n and m-element vertex and edge frontiers).
int iterations = 1; // Number of runs for testing
int traversal_mode = -1; // Load-balacned or Dynamic cooperative
g_quick = 1; // Whether or not to skip reference validation
// source vertex
args.GetCmdLineArgument("src", src_str);
if (src_str.empty())
{
src = 0;
}
else if (src_str.compare("randomize") == 0)
{
src = graphio::RandomNode(graph.nodes);
}
else if (src_str.compare("largestdegree") == 0)
{
int max_degree;
src = graph.GetNodeWithHighestDegree(max_degree);
printf("Using highest degree (%d) vertex: %d\n", max_degree, src);
}
else
{
args.GetCmdLineArgument("src", src);
}
// traversal mode
args.GetCmdLineArgument("traversal-mode", traversal_mode);
if (traversal_mode == -1)
{
traversal_mode = graph.GetAverageDegree() > 8 ? 0 : 1;
}
// printf("Display neighbor list of src:\n");
// graph.DisplayNeighborList(src);
mark_pred = args.CheckCmdLineFlag("mark-pred");
g_verbose = args.CheckCmdLineFlag("v");
instrumented = args.CheckCmdLineFlag("instrumented");
args.GetCmdLineArgument("quick", g_quick);
args.GetCmdLineArgument("iteration-num", iterations);
args.GetCmdLineArgument("grid-size", max_grid_size);
args.GetCmdLineArgument("idempotence", idempotence);
args.GetCmdLineArgument("queue-sizing", max_queue_sizing);
if (instrumented)
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, true, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, true, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, true, false, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, true, false, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
}
else
{
if (mark_pred)
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, true, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, true, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
else
{
if (idempotence)
{
RunTests<VertexId, Value, SizeT, false, false, true>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
else
{
RunTests<VertexId, Value, SizeT, false, false, false>(
graph,
src,
max_grid_size,
num_gpus,
max_queue_sizing,
iterations,
traversal_mode,
context);
}
}
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
EvqueueCreate(2);
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//cudaSetDeviceFlags(cudaDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
RunTests(csr, args, *context);
}
else if (graph_type == "rmat")
{
// parse rmat parameters
SizeT rmat_nodes = 1 << 10;
SizeT rmat_edges = 1 << 10;
double rmat_a = 0.55;
double rmat_b = 0.2;
double rmat_c = 0.2;
double rmat_d = 0.05;
if (graphio::BuildRmatGraph<false>(
rmat_nodes,
rmat_edges,
csr,
g_undirected,
rmat_a,
rmat_b,
rmat_c,
rmat_d) != 0)
{
return 1;
}
csr.PrintHistogram();
RunTests(csr, args, *context);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
EvqueueDestroy();
return 0;
}
|
9efb85d2b50167319ff69982747fd999d99ab3a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Linear.h"
#include <cassert>
#include <hip/hip_cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cooperative_groups/reduce.h>
#include <mma.hpp>
#include <omp.h>
#include <utils.h>
namespace cg = cooperative_groups;
using frag_t = culib::mma::mma_t<16, 16, 16>;
/*
* It returns the pointer of the top-left corner of give block in a matrix.
* Assume the matrix is stored in a row-major array.
* It needs the number of columns of the matrix (leading dimension).
*/
template <typename T, int SIZE = 16>
__device__ T *get_blk_start(T *data, const int row_blk, const int col_blk,
const int stride) {
return &data[row_blk * SIZE * stride + SIZE * col_blk];
}
template <typename TyGroup, typename T>
__device__ void memcpy2D(const TyGroup &g, T *__restrict__ dst,
const T *__restrict__ src, const int size,
const int ldm, const int ld_dst, const int ld_src) {
for (size_t i = g.thread_rank(); i < size; i += g.size()) {
const auto r = i / ldm;
const auto c = i % ldm;
dst[r * ld_dst + c] = src[r * ld_src + c];
}
g.sync();
}
__global__ void __kernel_SPMM1x16(const int *__restrict__ tile_idx,
const int *__restrict__ row_idx,
const half *__restrict__ data,
const int npart,
const half *__restrict__ dense,
half *__restrict__ res, const int size,
const int in_size, const int out_size) {
// (sizeof(half) * 256 + sizeof(int) * (npart+1)) * num_warp
extern __shared__ half smem[];
auto grid = cg::this_grid();
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
// global warp id
const auto warp_id = warp.meta_group_rank() + blockIdx.x * blockDim.x / 32;
// total warp in this grid
const auto num_warp = grid.size() / 32;
auto blk_temp = &smem[256 * warp.meta_group_rank()];
auto idx_temp = reinterpret_cast<int *>(blk_temp + 256);
frag_t::a_t<wmma::row_major> a_frag;
frag_t::b_t<wmma::col_major> b_frag;
frag_t::c_t<half> c_frag;
for (int i = warp_id; i < (out_size * size) / 256; i += num_warp) {
const auto row_set = i / (size / 16);
const auto dense_set = i % (size / 16);
const auto idx_start = &tile_idx[row_set * (npart + 1)];
cg::memcpy_async(warp, idx_temp, idx_start, sizeof(int) * (npart + 1));
wmma::fill_fragment(c_frag, 0);
cg::wait(warp);
for (int part_id = 0; part_id < npart; part_id++) {
auto workgroup = cg::tiled_partition<2>(warp);
const auto n_valid_row = idx_temp[part_id + 1] - idx_temp[part_id];
if (n_valid_row == 0)
continue;
const auto group_id = workgroup.meta_group_rank();
if (group_id < n_valid_row) {
// index for this tile
const auto idx = idx_temp[part_id] + group_id;
// relative row in 16x16 block
const auto row_id = row_idx[idx] % 16;
auto src = &data[16 * idx];
auto dst = &blk_temp[16 * row_id];
cg::memcpy_async(workgroup, dst, src, sizeof(half) * 16);
}
cg::wait(warp);
wmma::load_matrix_sync(a_frag, blk_temp, 16);
auto src = get_blk_start(dense, dense_set, part_id, in_size);
wmma::load_matrix_sync(b_frag, src, in_size);
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
}
const auto lane_id = warp.thread_rank();
}
}
void Linear<mat1x16>::forward(half *output, half *input,
hipStream_t stream) {
// int num_thd, num_blk;
// auto get_smem = [](int n) {
// return sizeof(half) * 256 * (1 + n / 32) + sizeof(int) * 16 * (n
// / 32);
// };
// hipOccupancyMaxPotentialBlockSizeVariableSMem(&num_blk, &num_thd,
// __kernel_SPMM1x16,
// get_smem);
// hipStream_t _stream;
// if (stream)
// _stream = *stream;
// else
// _stream = 0;
// __kernel_SPMM1x16<<<dim3(size / 16, weight.npart), num_thd,
// get_smem(num_thd), _stream>>>(
// weight.tile_idx.get(), weight.tile_row_idx.get(),
// weight.data.get(), input, output, size, in_size, out_size);
// auto bias_temp = this->bias.get();
// auto stride = out_size;
// const auto add_bias = [bias_temp, stride] __device__(half * data,
// int i) -> half {
// return data[i] + bias_temp[i % stride];
// };
// culib::cuda_map(output, size * out_size, add_bias, stream);
}
Linear<mat1x16>::Linear(int _in_size, int _out_size, const mat1x16 &w,
const half *b, int _size)
: in_size(_in_size), out_size(_out_size), size(_size), bias(b, _out_size),
weight(w) {}
Linear<mat1x16>::Linear(int _in_size, int _out_size, mat1x16 &&w, const half *b,
int _size)
: in_size(_in_size), out_size(_out_size), size(_size), bias(b, _out_size),
weight(w) {}
| 9efb85d2b50167319ff69982747fd999d99ab3a0.cu | #include "Linear.h"
#include <cassert>
#include <cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cooperative_groups/reduce.h>
#include <mma.hpp>
#include <omp.h>
#include <utils.h>
namespace cg = cooperative_groups;
using frag_t = culib::mma::mma_t<16, 16, 16>;
/*
* It returns the pointer of the top-left corner of give block in a matrix.
* Assume the matrix is stored in a row-major array.
* It needs the number of columns of the matrix (leading dimension).
*/
template <typename T, int SIZE = 16>
__device__ T *get_blk_start(T *data, const int row_blk, const int col_blk,
const int stride) {
return &data[row_blk * SIZE * stride + SIZE * col_blk];
}
template <typename TyGroup, typename T>
__device__ void memcpy2D(const TyGroup &g, T *__restrict__ dst,
const T *__restrict__ src, const int size,
const int ldm, const int ld_dst, const int ld_src) {
for (size_t i = g.thread_rank(); i < size; i += g.size()) {
const auto r = i / ldm;
const auto c = i % ldm;
dst[r * ld_dst + c] = src[r * ld_src + c];
}
g.sync();
}
__global__ void __kernel_SPMM1x16(const int *__restrict__ tile_idx,
const int *__restrict__ row_idx,
const half *__restrict__ data,
const int npart,
const half *__restrict__ dense,
half *__restrict__ res, const int size,
const int in_size, const int out_size) {
// (sizeof(half) * 256 + sizeof(int) * (npart+1)) * num_warp
extern __shared__ half smem[];
auto grid = cg::this_grid();
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
// global warp id
const auto warp_id = warp.meta_group_rank() + blockIdx.x * blockDim.x / 32;
// total warp in this grid
const auto num_warp = grid.size() / 32;
auto blk_temp = &smem[256 * warp.meta_group_rank()];
auto idx_temp = reinterpret_cast<int *>(blk_temp + 256);
frag_t::a_t<wmma::row_major> a_frag;
frag_t::b_t<wmma::col_major> b_frag;
frag_t::c_t<half> c_frag;
for (int i = warp_id; i < (out_size * size) / 256; i += num_warp) {
const auto row_set = i / (size / 16);
const auto dense_set = i % (size / 16);
const auto idx_start = &tile_idx[row_set * (npart + 1)];
cg::memcpy_async(warp, idx_temp, idx_start, sizeof(int) * (npart + 1));
wmma::fill_fragment(c_frag, 0);
cg::wait(warp);
for (int part_id = 0; part_id < npart; part_id++) {
auto workgroup = cg::tiled_partition<2>(warp);
const auto n_valid_row = idx_temp[part_id + 1] - idx_temp[part_id];
if (n_valid_row == 0)
continue;
const auto group_id = workgroup.meta_group_rank();
if (group_id < n_valid_row) {
// index for this tile
const auto idx = idx_temp[part_id] + group_id;
// relative row in 16x16 block
const auto row_id = row_idx[idx] % 16;
auto src = &data[16 * idx];
auto dst = &blk_temp[16 * row_id];
cg::memcpy_async(workgroup, dst, src, sizeof(half) * 16);
}
cg::wait(warp);
wmma::load_matrix_sync(a_frag, blk_temp, 16);
auto src = get_blk_start(dense, dense_set, part_id, in_size);
wmma::load_matrix_sync(b_frag, src, in_size);
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
}
const auto lane_id = warp.thread_rank();
}
}
void Linear<mat1x16>::forward(half *output, half *input,
cudaStream_t stream) {
// int num_thd, num_blk;
// auto get_smem = [](int n) {
// return sizeof(half) * 256 * (1 + n / 32) + sizeof(int) * 16 * (n
// / 32);
// };
// cudaOccupancyMaxPotentialBlockSizeVariableSMem(&num_blk, &num_thd,
// __kernel_SPMM1x16,
// get_smem);
// cudaStream_t _stream;
// if (stream)
// _stream = *stream;
// else
// _stream = 0;
// __kernel_SPMM1x16<<<dim3(size / 16, weight.npart), num_thd,
// get_smem(num_thd), _stream>>>(
// weight.tile_idx.get(), weight.tile_row_idx.get(),
// weight.data.get(), input, output, size, in_size, out_size);
// auto bias_temp = this->bias.get();
// auto stride = out_size;
// const auto add_bias = [bias_temp, stride] __device__(half * data,
// int i) -> half {
// return data[i] + bias_temp[i % stride];
// };
// culib::cuda_map(output, size * out_size, add_bias, stream);
}
Linear<mat1x16>::Linear(int _in_size, int _out_size, const mat1x16 &w,
const half *b, int _size)
: in_size(_in_size), out_size(_out_size), size(_size), bias(b, _out_size),
weight(w) {}
Linear<mat1x16>::Linear(int _in_size, int _out_size, mat1x16 &&w, const half *b,
int _size)
: in_size(_in_size), out_size(_out_size), size(_size), bias(b, _out_size),
weight(w) {}
|
1ee1168f9065ce3ff76857ef5e26e4d0d94fbbf6.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/merge.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <chrono>
#include <iostream>
#include <string>
#include <vector>
#include "advanced_sparse.h"
#include "common_sparse.h"
#include "simple_sparse.h"
#include "../pml/csvwriter.h"
#include "../pml/measurement.h"
int main(void)
{
int iterations = 100;
std::vector<int> vectorCount;
std::vector<MeasurementSeries<std::chrono::microseconds>> oldTimes;
std::vector<MeasurementSeries<std::chrono::microseconds>> newTimes;
for (int number_vectors = 2; number_vectors <= 10; ++number_vectors)
{
vectorCount.push_back(number_vectors);
// generate index vectors
std::vector<thrust::device_vector<int>> indexVectors(number_vectors);
for (size_t i = 0; i < indexVectors.size(); ++i)
{
thrust::device_vector<int> tmpIndex(10000);
fillRandomIndexVector(tmpIndex, 100000);
indexVectors[i] = tmpIndex;
}
// generate value vectors
std::vector<thrust::device_vector<float>> valueVectors(number_vectors);
for (size_t i = 0; i < valueVectors.size(); ++i)
{
thrust::device_vector<float> tmpValue(10000);
fillRandomValueVector(tmpValue, 10);
valueVectors[i] = tmpValue;
}
// use old method
MeasurementSeries<std::chrono::microseconds> measurementOld;
thrust::device_vector<int> result_index_old;
thrust::device_vector<float> result_value_old;
for (int i = 0; i < iterations; ++i)
{
hipDeviceSynchronize();
measurementOld.start();
sum_sparse_vectors(indexVectors, valueVectors, result_index_old, result_value_old);
hipDeviceSynchronize();
measurementOld.stop();
}
oldTimes.push_back(measurementOld);
// use new method
MeasurementSeries<std::chrono::microseconds> measurementNew;
thrust::device_vector<int> result_index_new;
thrust::device_vector<float> result_value_new;
for (int i = 0; i < iterations; ++i)
{
hipDeviceSynchronize();
measurementNew.start();
sum_multiple_sparse_vectors(indexVectors, valueVectors, result_index_new, result_value_new);
hipDeviceSynchronize();
measurementNew.stop();
}
newTimes.push_back(measurementNew);
if (!checkResults(result_index_old, result_value_old, result_index_new, result_value_new))
{
std::cout << "result is wrong" << std::endl;
}
}
CSVWriter csvwriter("sparse.csv");
std::vector<std::string> headerNames {"vector count", "old method", "new method"};
csvwriter.setHeaderNames(std::move(headerNames));
csvwriter.write(vectorCount, oldTimes, newTimes);
}
| 1ee1168f9065ce3ff76857ef5e26e4d0d94fbbf6.cu | #include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/merge.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <chrono>
#include <iostream>
#include <string>
#include <vector>
#include "advanced_sparse.h"
#include "common_sparse.h"
#include "simple_sparse.h"
#include "../pml/csvwriter.h"
#include "../pml/measurement.h"
int main(void)
{
int iterations = 100;
std::vector<int> vectorCount;
std::vector<MeasurementSeries<std::chrono::microseconds>> oldTimes;
std::vector<MeasurementSeries<std::chrono::microseconds>> newTimes;
for (int number_vectors = 2; number_vectors <= 10; ++number_vectors)
{
vectorCount.push_back(number_vectors);
// generate index vectors
std::vector<thrust::device_vector<int>> indexVectors(number_vectors);
for (size_t i = 0; i < indexVectors.size(); ++i)
{
thrust::device_vector<int> tmpIndex(10000);
fillRandomIndexVector(tmpIndex, 100000);
indexVectors[i] = tmpIndex;
}
// generate value vectors
std::vector<thrust::device_vector<float>> valueVectors(number_vectors);
for (size_t i = 0; i < valueVectors.size(); ++i)
{
thrust::device_vector<float> tmpValue(10000);
fillRandomValueVector(tmpValue, 10);
valueVectors[i] = tmpValue;
}
// use old method
MeasurementSeries<std::chrono::microseconds> measurementOld;
thrust::device_vector<int> result_index_old;
thrust::device_vector<float> result_value_old;
for (int i = 0; i < iterations; ++i)
{
cudaDeviceSynchronize();
measurementOld.start();
sum_sparse_vectors(indexVectors, valueVectors, result_index_old, result_value_old);
cudaDeviceSynchronize();
measurementOld.stop();
}
oldTimes.push_back(measurementOld);
// use new method
MeasurementSeries<std::chrono::microseconds> measurementNew;
thrust::device_vector<int> result_index_new;
thrust::device_vector<float> result_value_new;
for (int i = 0; i < iterations; ++i)
{
cudaDeviceSynchronize();
measurementNew.start();
sum_multiple_sparse_vectors(indexVectors, valueVectors, result_index_new, result_value_new);
cudaDeviceSynchronize();
measurementNew.stop();
}
newTimes.push_back(measurementNew);
if (!checkResults(result_index_old, result_value_old, result_index_new, result_value_new))
{
std::cout << "result is wrong" << std::endl;
}
}
CSVWriter csvwriter("sparse.csv");
std::vector<std::string> headerNames {"vector count", "old method", "new method"};
csvwriter.setHeaderNames(std::move(headerNames));
csvwriter.write(vectorCount, oldTimes, newTimes);
}
|
a39b7467c0df430a422b22af044337f48192b406.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Cuda error checking
////////////////////////////////////////////////////////////////////////////////
void SAFE_CALL(hipError_t err){
if(err != hipSuccess){
printf("Error: %s \n", hipGetErrorString(err));
}
}
void KERNEL_ERROR_CHECK(){
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess){
printf("\tSync kernel error: %s \n", hipGetErrorString(errSync));
}
if(errAsync != hipSuccess){
printf("\tAsync kernel error: %s \n", hipGetErrorString(errAsync));
}
}
void KERNEL_ERROR_CHECK(char const *message){
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess){
printf("%s\n", message);
printf("\tSync kernel error: %s \n", hipGetErrorString(errSync));
}
if(errAsync != hipSuccess){
printf("%s\n", message);
printf("\tAsync kernel error: %s \n", hipGetErrorString(errAsync));
}
}
| a39b7467c0df430a422b22af044337f48192b406.cu | #include <stdio.h>
#include "cuda.h"
#include "cuda_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Cuda error checking
////////////////////////////////////////////////////////////////////////////////
void SAFE_CALL(cudaError_t err){
if(err != cudaSuccess){
printf("Error: %s \n", cudaGetErrorString(err));
}
}
void KERNEL_ERROR_CHECK(){
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess){
printf("\tSync kernel error: %s \n", cudaGetErrorString(errSync));
}
if(errAsync != cudaSuccess){
printf("\tAsync kernel error: %s \n", cudaGetErrorString(errAsync));
}
}
void KERNEL_ERROR_CHECK(char const *message){
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess){
printf("%s\n", message);
printf("\tSync kernel error: %s \n", cudaGetErrorString(errSync));
}
if(errAsync != cudaSuccess){
printf("%s\n", message);
printf("\tAsync kernel error: %s \n", cudaGetErrorString(errAsync));
}
}
|
dcd4d456bb78dc67d5e7501e94b6f8fdb9c810cd.hip | // !!! This is a file automatically generated by hipify!!!
// execute by typing nvcc que1.cu
// ./a.out
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 32
__global__ void initArray(int *arr)
{
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
arr[tidx] = tidx;
}
__global__ void square (int *matrix, int *result, int matrixsize) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int ii = id / matrixsize;
int jj = id % matrixsize;
int index = ii * matrixsize + jj;
for (int kk = 0; kk < matrixsize; ++kk) {
int ix = ii * matrixsize + kk;
int jx = kk * matrixsize + jj;
int r = matrix[ix] * matrix[jx];
printf("Mresult_arr[%d] = %d\n", index, r);
printf("ix = %d; jx = ;\n", ix, jx);
result[index] += kk;
}
}
int main()
{
int *arr;
int *result_arr;
int *d_arr;
int *d_result_arr;
int raw_size = (N * 2);
int size = raw_size * sizeof(int);
arr = (int *)malloc(size);
result_arr = (int *)malloc(size);
hipMalloc((void **)&d_arr, size);
hipMalloc((void **)&d_result_arr, size);
hipLaunchKernelGGL(( initArray), dim3(raw_size),dim3(1), 0, 0, d_arr);
hipLaunchKernelGGL(( square), dim3(raw_size),dim3(1), 0, 0, d_arr, d_result_arr, raw_size);
hipMemcpy(result_arr, d_result_arr, size, hipMemcpyDeviceToHost);
free(arr);
hipFree(d_arr);
return 0;
}
| dcd4d456bb78dc67d5e7501e94b6f8fdb9c810cd.cu | // execute by typing nvcc que1.cu
// ./a.out
#include <stdio.h>
#include <cuda.h>
#define N 32
__global__ void initArray(int *arr)
{
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
arr[tidx] = tidx;
}
__global__ void square (int *matrix, int *result, int matrixsize) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int ii = id / matrixsize;
int jj = id % matrixsize;
int index = ii * matrixsize + jj;
for (int kk = 0; kk < matrixsize; ++kk) {
int ix = ii * matrixsize + kk;
int jx = kk * matrixsize + jj;
int r = matrix[ix] * matrix[jx];
printf("Mresult_arr[%d] = %d\n", index, r);
printf("ix = %d; jx = ;\n", ix, jx);
result[index] += kk;
}
}
int main()
{
int *arr;
int *result_arr;
int *d_arr;
int *d_result_arr;
int raw_size = (N * 2);
int size = raw_size * sizeof(int);
arr = (int *)malloc(size);
result_arr = (int *)malloc(size);
cudaMalloc((void **)&d_arr, size);
cudaMalloc((void **)&d_result_arr, size);
initArray<<<raw_size,1>>>(d_arr);
square<<<raw_size,1>>>(d_arr, d_result_arr, raw_size);
cudaMemcpy(result_arr, d_result_arr, size, cudaMemcpyDeviceToHost);
free(arr);
cudaFree(d_arr);
return 0;
}
|
5fa7b7594f0aafc99077203938da2a75111765e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<fstream>
#include<stdlib.h>
#include<string>
#include<stdio.h>
#include<omp.h>
#include<cuda_runtime.h>
#include<hipfft.h>
#include<hipfftXt.h>
#include<math.h>
char** sample_names;
size_t* sample_sizes;
int num_samples = 0;
__global__
void calc_LAD(hipfftComplex* sample, hipfftComplex* partial, int sample_size){
int id = blockDim.x*blockIdx.x + threadIdx.x;
int sample_idx = id % sample_size;
if(id < 4*sample_size ){
float diff = abs(sqrt(sample[sample_idx].x*sample[sample_idx].x + sample[sample_idx].y*sample[sample_idx].y)
- sqrt(partial[id].x*partial[id].x + partial[id].y*partial[id].y));
partial[id].x = diff;
partial[id].y = 0;
__syncthreads();
for(unsigned int s = sample_size/2; s>0; s>>=1){
if(sample_idx < s)
partial[id].x += partial[id+s].x;
__syncthreads();
}
//now the results are in the num_threads first elements of the array so we'll need one memcpy in host code
if(sample_idx ==0)
partial[id/sample_size] = partial[id];
}
}
int get_num_files(std::string files){
int c=0;
for(int i=0; i < files.length() ; i ++)
if(files.at(i) == '\n') c++;
return c;
}
size_t get_data_size(std::string file_path){
std::ifstream in(file_path.c_str(), std::ifstream::ate | std::ifstream::binary);
size_t size = in.tellg();
in.close();
return size;
}
void copy_samples(char* argv[]){
std::string songs_path(argv[2]);
std::string command = "cp ";
command.append(songs_path);
command.append("/* ");
command.append("./Converted");
system(command.c_str());
}
std::string run_command(std::string cmd) {
std::string data;
FILE * stream;
const int max_buffer = 256;
char buffer[max_buffer];
cmd.append(" 2>&1");
stream = popen(cmd.c_str(), "r");
if (stream) {
while (!feof(stream))
if (fgets(buffer, max_buffer, stream) != NULL) data.append(buffer);
pclose(stream);
}
return data;
}
hipfftComplex* read_file(std::string file_path, size_t * size, bool shrink, int downsampling_factor){
*size = get_data_size(file_path);
(*size)/=downsampling_factor;
//shrink the sample into a power of 2 so that transformations are done fast
if(shrink)
*size = (size_t)pow(2, (size_t)log2(*size));
FILE* file;
file = fopen(file_path.c_str(), "r");
if(file == NULL){
printf("Error: Couldn't open file %s\n", file_path.c_str());
exit(EXIT_FAILURE);
}
hipfftComplex* data_cufft = (hipfftComplex*)malloc(*size*sizeof(hipfftComplex));
unsigned char* data = (unsigned char*)malloc((*size*downsampling_factor)*sizeof(char));
fread(data, 1, *size*downsampling_factor,file);
for(int i =0; i < *size; i ++){
data_cufft[i].x = (float) data[i*downsampling_factor];
//we're dealing with real numbers so set phase to 0
data_cufft[i].y = 0;
}
fclose(file);
return data_cufft;
}
hipfftComplex** bring_samples_data(std::string supdir, std::string files){
num_samples = get_num_files(files);
hipfftComplex** all_data_cufft = (hipfftComplex**)malloc(num_samples*sizeof(hipfftComplex*));
sample_names = (char**)malloc(num_samples*sizeof(char*));
sample_sizes = (size_t*)malloc(num_samples*sizeof(size_t));
std::string delimiter = "\n";
size_t pos = 0;
std::string file;
int c = num_samples;
float start = omp_get_wtime();
while ((pos = files.find(delimiter)) != std::string::npos){
file = files.substr(0, pos);
sample_names[--c] = (char*)malloc(file.length()*sizeof(char));
strcpy(sample_names[c] , file.c_str());
files.erase(0, pos + delimiter.length());
std::string s = supdir;
s.append(file);
//sample_sizes[c] = get_data_size(s);
all_data_cufft[c] = read_file(s, &sample_sizes[c], true, 1);
printf("%s: data read\n", file.c_str());
}
float end = omp_get_wtime();
printf("time elapsed: %f\n", end - start);
return all_data_cufft;
}
void get_cuda_error(hipError_t error, int line){
if(error != hipSuccess){
printf("%s line: %d\n", hipGetErrorString(error), line);
exit(EXIT_FAILURE);
}
}
void get_cufft_result(hipfftResult_t result, int line){
if(result != HIPFFT_SUCCESS){
printf("CUFFT error number %d at line: %d\n", result, line);
exit(EXIT_FAILURE);
}
}
int calc_num_threads(int sample_size){
size_t work_area_size;
size_t free_mem;
hipMemGetInfo(&free_mem, NULL);
hipfftEstimate1d(sample_size, HIPFFT_R2C, 1, &work_area_size);
//(x-1)*work_area_size + x*(sample_size/2+1)*sizeof(hipfftComplex) < free_mem
return min((size_t)(4),(free_mem + work_area_size)/(work_area_size + (sample_size/2+1)*sizeof(hipfftComplex)));
}
int main(int argc, char*argv[]){
//copy music to converter music directory
copy_samples(argv);
//run converter
system("python3.6 ./Converter.py");
//bring in sample songs' data to RAM
std::string all_sample_data = run_command("ls ./Data");
hipfftComplex** all_samples_data = bring_samples_data("./Data/", all_sample_data);
//get a list of all complete data files
std::string command = "ls ";
command.append(argv[1]);
std::string all_complete_data = run_command(command);
//traverse through complete data files and compare
std::string delimiter = "\n";
size_t pos = 0;
std::string file;
//This is main loop of the program
while ((pos = all_complete_data.find(delimiter)) != std::string::npos){
file = all_complete_data.substr(0, pos);
all_complete_data.erase(0, pos + delimiter.length());
std::string s = argv[1];
s.append("/");
s.append(file);
/*There is a trick in which input should be aligned to hipfftComplex upon memory allocation
and plane creation.I think for omptimization reasons. but when executing the plan you just cast
the input to hipfftReal*/
size_t data_size;
hipfftComplex* complete_data = read_file(s, &data_size,true, 1);
printf("%s: data read\n", file.c_str());
hipfftComplex* d_complete_data;
hipError_t error = hipMalloc((void**)&d_complete_data, data_size*sizeof(hipfftComplex));
get_cuda_error(error, __LINE__);
//size_t free_mem;
//hipMemGetInfo(&free_mem, NULL);
//printf("free mem: %zu\n", free_mem);
error = hipMemcpy(d_complete_data, complete_data, data_size*sizeof(hipfftComplex), hipMemcpyHostToDevice);
get_cuda_error(error, __LINE__);
hipfftHandle plan;
hipfftResult_t result;
float min_lad;
for(int sample_no =0; sample_no < num_samples ; sample_no++){
min_lad = -1;
hipfftComplex* d_sample_data;
error = hipMalloc((void**)&d_sample_data, sample_sizes[sample_no]*sizeof(hipfftComplex));
get_cuda_error(error, __LINE__);
error = hipMemcpy(d_sample_data, all_samples_data[sample_no], sample_sizes[sample_no]*sizeof(hipfftComplex), hipMemcpyHostToDevice);
get_cuda_error(error, __LINE__);
result = hipfftPlan1d(&plan, sample_sizes[sample_no], HIPFFT_R2C,1);
get_cufft_result(result, __LINE__);
result = hipfftExecR2C(plan, (hipfftReal*)d_sample_data, d_sample_data);
get_cufft_result(result, __LINE__);
//printf("%s is transformed and ready to check\n", sample_names[sample_no]);
error = hipDeviceSynchronize();
get_cuda_error(error, __LINE__);
//now is time to compare the sample with the complete data
int num_threads = calc_num_threads(sample_sizes[sample_no]);
//create different plans for different host threads, it's a necessity imposed by cuFFT thread safety
hipfftHandle* plans = (hipfftHandle*)malloc((num_threads-1)*sizeof(hipfftHandle));
plans[0] = plan;
for(int i=1; i < num_threads ; i++){
result = hipfftPlan1d(&plans[i], sample_sizes[sample_no], HIPFFT_R2C, 1);
get_cufft_result(result, __LINE__);
}
hipfftComplex* d_partial_data_transformed;//This contains subsets of data that are being transformed in parallel
error = hipMalloc((void**)&d_partial_data_transformed,num_threads*(sample_sizes[sample_no]/2+1)*sizeof(hipfftComplex));
get_cuda_error(error, __LINE__);
/*if the time-domain signal is somehow continuous(for sound waves it's not illogical to assume that),
then windows with little space between them show approximately the same signals with similar FFTs
so by taking advantage of that we don't compare sample with every window in complete signal and put
a little bit of padding called space between the windows.
*/
size_t ss = sample_sizes[sample_no];
int space = ss/32;
hipfftComplex* lad_results = (hipfftComplex*) malloc(num_threads*sizeof(hipfftComplex));
bool *stopped = (bool*)malloc(num_threads*sizeof(bool));// this is for contorlling when to stop
bool everybody_stopped= false;
int num_stopped_threads = 0;
#pragma omp parallel num_threads(num_threads)
{
int ID = omp_get_thread_num();
for(int i = ID*space ; i < data_size && !everybody_stopped ; i+= num_threads*space){
if(i +ss < data_size){// the last chunk might be small so we have to handle that
//the thread needs to stay so we can pass the barrier
// but it doesn't do any work
hipfftExecR2C(plans[ID], (hipfftReal*)d_complete_data+i, d_partial_data_transformed + ID*(ss/2 +1));
error = hipDeviceSynchronize();
get_cuda_error(error, __LINE__);
}
else{// now this thread has no work to do
if(stopped[ID] == false){
stopped[ID] = true;
num_stopped_threads ++;
}
}
#pragma omp barrier
#pragma omp single
{
if(num_stopped_threads == num_threads)// all threads have reached chunks smaller than sample_size and
everybody_stopped = true; // therefore have stopped working
else{// there are active threads
int block_dim = 1024;
int grid_dim = (num_threads*(ss/2+1)-1)/block_dim +1;
hipLaunchKernelGGL(( calc_LAD), dim3(grid_dim), dim3(block_dim), 0, 0, d_sample_data, d_partial_data_transformed, ss/2+1);
error = hipDeviceSynchronize();
get_cuda_error(error, __LINE__);
hipMemcpy(lad_results, d_partial_data_transformed, num_threads*sizeof(hipfftComplex), hipMemcpyDeviceToHost);
int min_index =0;
for(int j=0; j < num_threads; j++)
if(abs(lad_results[j].x) < abs(lad_results[min_index].x))
min_index = j;
if(min_lad == -1)
min_lad = abs(lad_results[min_index].x);
else if(lad_results[min_index].x<min_lad)
min_lad = lad_results[min_index].x;
}
}
}
}
if(min_lad < 10000000)
printf("%s matched\n", sample_names[sample_no]);
//printf("min_lad=%f\n", min_lad);
for(int i=0; i < num_threads; i++)
hipfftDestroy(plans[i]);
hipFree(d_sample_data);
hipFree(d_partial_data_transformed);
}
hipFree(d_complete_data);
}
return 0;
}
| 5fa7b7594f0aafc99077203938da2a75111765e6.cu | #include<fstream>
#include<stdlib.h>
#include<string>
#include<stdio.h>
#include<omp.h>
#include<cuda_runtime.h>
#include<cufft.h>
#include<cufftXt.h>
#include<math.h>
char** sample_names;
size_t* sample_sizes;
int num_samples = 0;
__global__
void calc_LAD(cufftComplex* sample, cufftComplex* partial, int sample_size){
int id = blockDim.x*blockIdx.x + threadIdx.x;
int sample_idx = id % sample_size;
if(id < 4*sample_size ){
float diff = abs(sqrt(sample[sample_idx].x*sample[sample_idx].x + sample[sample_idx].y*sample[sample_idx].y)
- sqrt(partial[id].x*partial[id].x + partial[id].y*partial[id].y));
partial[id].x = diff;
partial[id].y = 0;
__syncthreads();
for(unsigned int s = sample_size/2; s>0; s>>=1){
if(sample_idx < s)
partial[id].x += partial[id+s].x;
__syncthreads();
}
//now the results are in the num_threads first elements of the array so we'll need one memcpy in host code
if(sample_idx ==0)
partial[id/sample_size] = partial[id];
}
}
int get_num_files(std::string files){
int c=0;
for(int i=0; i < files.length() ; i ++)
if(files.at(i) == '\n') c++;
return c;
}
size_t get_data_size(std::string file_path){
std::ifstream in(file_path.c_str(), std::ifstream::ate | std::ifstream::binary);
size_t size = in.tellg();
in.close();
return size;
}
void copy_samples(char* argv[]){
std::string songs_path(argv[2]);
std::string command = "cp ";
command.append(songs_path);
command.append("/* ");
command.append("./Converted");
system(command.c_str());
}
std::string run_command(std::string cmd) {
std::string data;
FILE * stream;
const int max_buffer = 256;
char buffer[max_buffer];
cmd.append(" 2>&1");
stream = popen(cmd.c_str(), "r");
if (stream) {
while (!feof(stream))
if (fgets(buffer, max_buffer, stream) != NULL) data.append(buffer);
pclose(stream);
}
return data;
}
cufftComplex* read_file(std::string file_path, size_t * size, bool shrink, int downsampling_factor){
*size = get_data_size(file_path);
(*size)/=downsampling_factor;
//shrink the sample into a power of 2 so that transformations are done fast
if(shrink)
*size = (size_t)pow(2, (size_t)log2(*size));
FILE* file;
file = fopen(file_path.c_str(), "r");
if(file == NULL){
printf("Error: Couldn't open file %s\n", file_path.c_str());
exit(EXIT_FAILURE);
}
cufftComplex* data_cufft = (cufftComplex*)malloc(*size*sizeof(cufftComplex));
unsigned char* data = (unsigned char*)malloc((*size*downsampling_factor)*sizeof(char));
fread(data, 1, *size*downsampling_factor,file);
for(int i =0; i < *size; i ++){
data_cufft[i].x = (float) data[i*downsampling_factor];
//we're dealing with real numbers so set phase to 0
data_cufft[i].y = 0;
}
fclose(file);
return data_cufft;
}
cufftComplex** bring_samples_data(std::string supdir, std::string files){
num_samples = get_num_files(files);
cufftComplex** all_data_cufft = (cufftComplex**)malloc(num_samples*sizeof(cufftComplex*));
sample_names = (char**)malloc(num_samples*sizeof(char*));
sample_sizes = (size_t*)malloc(num_samples*sizeof(size_t));
std::string delimiter = "\n";
size_t pos = 0;
std::string file;
int c = num_samples;
float start = omp_get_wtime();
while ((pos = files.find(delimiter)) != std::string::npos){
file = files.substr(0, pos);
sample_names[--c] = (char*)malloc(file.length()*sizeof(char));
strcpy(sample_names[c] , file.c_str());
files.erase(0, pos + delimiter.length());
std::string s = supdir;
s.append(file);
//sample_sizes[c] = get_data_size(s);
all_data_cufft[c] = read_file(s, &sample_sizes[c], true, 1);
printf("%s: data read\n", file.c_str());
}
float end = omp_get_wtime();
printf("time elapsed: %f\n", end - start);
return all_data_cufft;
}
void get_cuda_error(cudaError_t error, int line){
if(error != cudaSuccess){
printf("%s line: %d\n", cudaGetErrorString(error), line);
exit(EXIT_FAILURE);
}
}
void get_cufft_result(cufftResult_t result, int line){
if(result != CUFFT_SUCCESS){
printf("CUFFT error number %d at line: %d\n", result, line);
exit(EXIT_FAILURE);
}
}
int calc_num_threads(int sample_size){
size_t work_area_size;
size_t free_mem;
cudaMemGetInfo(&free_mem, NULL);
cufftEstimate1d(sample_size, CUFFT_R2C, 1, &work_area_size);
//(x-1)*work_area_size + x*(sample_size/2+1)*sizeof(cufftComplex) < free_mem
return min((size_t)(4),(free_mem + work_area_size)/(work_area_size + (sample_size/2+1)*sizeof(cufftComplex)));
}
int main(int argc, char*argv[]){
//copy music to converter music directory
copy_samples(argv);
//run converter
system("python3.6 ./Converter.py");
//bring in sample songs' data to RAM
std::string all_sample_data = run_command("ls ./Data");
cufftComplex** all_samples_data = bring_samples_data("./Data/", all_sample_data);
//get a list of all complete data files
std::string command = "ls ";
command.append(argv[1]);
std::string all_complete_data = run_command(command);
//traverse through complete data files and compare
std::string delimiter = "\n";
size_t pos = 0;
std::string file;
//This is main loop of the program
while ((pos = all_complete_data.find(delimiter)) != std::string::npos){
file = all_complete_data.substr(0, pos);
all_complete_data.erase(0, pos + delimiter.length());
std::string s = argv[1];
s.append("/");
s.append(file);
/*There is a trick in which input should be aligned to cufftComplex upon memory allocation
and plane creation.I think for omptimization reasons. but when executing the plan you just cast
the input to cufftReal*/
size_t data_size;
cufftComplex* complete_data = read_file(s, &data_size,true, 1);
printf("%s: data read\n", file.c_str());
cufftComplex* d_complete_data;
cudaError_t error = cudaMalloc((void**)&d_complete_data, data_size*sizeof(cufftComplex));
get_cuda_error(error, __LINE__);
//size_t free_mem;
//cudaMemGetInfo(&free_mem, NULL);
//printf("free mem: %zu\n", free_mem);
error = cudaMemcpy(d_complete_data, complete_data, data_size*sizeof(cufftComplex), cudaMemcpyHostToDevice);
get_cuda_error(error, __LINE__);
cufftHandle plan;
cufftResult_t result;
float min_lad;
for(int sample_no =0; sample_no < num_samples ; sample_no++){
min_lad = -1;
cufftComplex* d_sample_data;
error = cudaMalloc((void**)&d_sample_data, sample_sizes[sample_no]*sizeof(cufftComplex));
get_cuda_error(error, __LINE__);
error = cudaMemcpy(d_sample_data, all_samples_data[sample_no], sample_sizes[sample_no]*sizeof(cufftComplex), cudaMemcpyHostToDevice);
get_cuda_error(error, __LINE__);
result = cufftPlan1d(&plan, sample_sizes[sample_no], CUFFT_R2C,1);
get_cufft_result(result, __LINE__);
result = cufftExecR2C(plan, (cufftReal*)d_sample_data, d_sample_data);
get_cufft_result(result, __LINE__);
//printf("%s is transformed and ready to check\n", sample_names[sample_no]);
error = cudaDeviceSynchronize();
get_cuda_error(error, __LINE__);
//now is time to compare the sample with the complete data
int num_threads = calc_num_threads(sample_sizes[sample_no]);
//create different plans for different host threads, it's a necessity imposed by cuFFT thread safety
cufftHandle* plans = (cufftHandle*)malloc((num_threads-1)*sizeof(cufftHandle));
plans[0] = plan;
for(int i=1; i < num_threads ; i++){
result = cufftPlan1d(&plans[i], sample_sizes[sample_no], CUFFT_R2C, 1);
get_cufft_result(result, __LINE__);
}
cufftComplex* d_partial_data_transformed;//This contains subsets of data that are being transformed in parallel
error = cudaMalloc((void**)&d_partial_data_transformed,num_threads*(sample_sizes[sample_no]/2+1)*sizeof(cufftComplex));
get_cuda_error(error, __LINE__);
/*if the time-domain signal is somehow continuous(for sound waves it's not illogical to assume that),
then windows with little space between them show approximately the same signals with similar FFTs
so by taking advantage of that we don't compare sample with every window in complete signal and put
a little bit of padding called space between the windows.
*/
size_t ss = sample_sizes[sample_no];
int space = ss/32;
cufftComplex* lad_results = (cufftComplex*) malloc(num_threads*sizeof(cufftComplex));
bool *stopped = (bool*)malloc(num_threads*sizeof(bool));// this is for contorlling when to stop
bool everybody_stopped= false;
int num_stopped_threads = 0;
#pragma omp parallel num_threads(num_threads)
{
int ID = omp_get_thread_num();
for(int i = ID*space ; i < data_size && !everybody_stopped ; i+= num_threads*space){
if(i +ss < data_size){// the last chunk might be small so we have to handle that
//the thread needs to stay so we can pass the barrier
// but it doesn't do any work
cufftExecR2C(plans[ID], (cufftReal*)d_complete_data+i, d_partial_data_transformed + ID*(ss/2 +1));
error = cudaDeviceSynchronize();
get_cuda_error(error, __LINE__);
}
else{// now this thread has no work to do
if(stopped[ID] == false){
stopped[ID] = true;
num_stopped_threads ++;
}
}
#pragma omp barrier
#pragma omp single
{
if(num_stopped_threads == num_threads)// all threads have reached chunks smaller than sample_size and
everybody_stopped = true; // therefore have stopped working
else{// there are active threads
int block_dim = 1024;
int grid_dim = (num_threads*(ss/2+1)-1)/block_dim +1;
calc_LAD<<<grid_dim, block_dim>>>(d_sample_data, d_partial_data_transformed, ss/2+1);
error = cudaDeviceSynchronize();
get_cuda_error(error, __LINE__);
cudaMemcpy(lad_results, d_partial_data_transformed, num_threads*sizeof(cufftComplex), cudaMemcpyDeviceToHost);
int min_index =0;
for(int j=0; j < num_threads; j++)
if(abs(lad_results[j].x) < abs(lad_results[min_index].x))
min_index = j;
if(min_lad == -1)
min_lad = abs(lad_results[min_index].x);
else if(lad_results[min_index].x<min_lad)
min_lad = lad_results[min_index].x;
}
}
}
}
if(min_lad < 10000000)
printf("%s matched\n", sample_names[sample_no]);
//printf("min_lad=%f\n", min_lad);
for(int i=0; i < num_threads; i++)
cufftDestroy(plans[i]);
cudaFree(d_sample_data);
cudaFree(d_partial_data_transformed);
}
cudaFree(d_complete_data);
}
return 0;
}
|
d3adaaa97cd25d801b0f367f7fda6bf5751f6642.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_atan (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = atan(x[id]);
}
} | d3adaaa97cd25d801b0f367f7fda6bf5751f6642.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_atan (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = atan(x[id]);
}
} |
f6152f00f93d1ede428a254b166f3b9452047ac8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "../benchmark_common.h"
#include "makebmp.h"
/*#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>*/
#include <cutil.h>
typedef unsigned int uint;
typedef unsigned char uchar;
#define numObj 4
#define PI 3.141592654f
#define Angle(a) ((a * PI) / 180.0)
//#define DEVICE_EMU
//#define DEBUG_RT_CUDA
#define FIXED_CONST_PARSE
#ifdef DEBUG_RT_CUDA
#define DEBUG_NUM 8
float4* d_debug_float4;
uint* d_debug_uint;
float4* h_debug_float4;
uint* h_debug_uint;
#endif
int g_verbose_ray;
#include "rayTracing_kernel.cu"
unsigned width = 64; // 640; //512; //16; //32; //512;
unsigned height = 64; // 480; //512; //16;//512;
dim3 blockSize(16, 8);
dim3 gridSize(width / blockSize.x, height / blockSize.y);
float3 viewRotation;
float3 viewTranslation = make_float3(0.0, 0.0, -4.0f);
float invViewMatrix[12];
// static int fpsCount = 0; // FPS count for averaging
// static int fpsLimit = 1; // FPS limit for sampling
unsigned int timer;
// GLuint pbo = 0; // Pixel buffer d'OpenGL
void initPixelBuffer();
class Observateur {
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur();
Observateur(const float3&, const float3&, const float3&, double);
inline const matrice3x4& getMatrice() const { return M; }
inline float getDistance() const { return df; }
};
Observateur::Observateur() {
M.m[0] = make_float4(0.0f, 0.0f, 1.0f, 0.0f);
M.m[1] = make_float4(0.0f, 1.0f, 0.0f, 0.0f);
M.m[2] = make_float4(1.0f, 0.0f, 0.0f, 0.0f);
df = 1.0 / tan(Angle(65) / 2.0);
}
Observateur::Observateur(const float3& p,
const float3& u,
const float3& v,
double a) {
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U, VP) * U);
W = normalize(cross(U, V));
M.m[0] = make_float4(U.x, U.y, U.z, p.x);
M.m[1] = make_float4(V.x, V.y, V.z, p.y);
M.m[2] = make_float4(W.x, W.y, W.z, p.z);
df = 1.0 / tan(Angle(a) / 2.0);
}
float anim = 0.0f, pas = 0.015f;
Observateur obs = Observateur(
make_float3(0.0f, 0.5f, 2.0f),
normalize(make_float3(0.0f, 0.0f, 0.0f) - make_float3(0.0f, 0.5f, 2.0f)),
make_float3(0.0f, 1.0f, 0.0f),
65.0f);
;
uint *values = NULL, *r_output, *d_temp, NUM;
uint* c_output;
Node node[numObj], *d_node;
Sphere s, s1, s2;
float phi;
uint* nObj;
float* prof;
Rayon* ray;
float3 *A, *u;
int t = 1;
void initObjet(hipStream_t stream_app) {
srand(47);
node->s.r = 1.0f;
node[0].s.C = make_float3(0.0f, -1.5f, -0.0f);
node[0].s.r = 0.5f;
node[1].s.C = make_float3(-1.0f, 0.0f, -1.0f);
node[1].s.r = 0.5f;
node[2].s.C = make_float3(1.0f, -0.f, -1.0f);
node[2].s.r = 0.5f;
node[3].s.C = make_float3(0.0f, -0.f, -2.0f);
node[3].s.r = 0.75f;
for (int i(4); i < numObj; i++) {
float r, v, b;
float tmp1(5.0f * ((r = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp2(5.0f * ((v = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp3(-5.0f * ((b = (float(rand() % 255) / 255.0f))));
float tmp4((rand() % 100) / 100.0f);
node[i].s.C = make_float3(tmp1, tmp2, tmp3);
node[i].s.r = tmp4;
node[i].s.R = r;
node[i].s.V = v;
node[i].s.B = b;
node[i].s.A = 1.0f;
node[i].fg = 0;
node[i].fd = 0;
}
node[0].s.R = 0.0f;
node[0].s.V = 1.0f;
node[0].s.B = 1.0f;
node[0].s.A = 1.0f;
node[1].s.R = 1.0f;
node[1].s.V = 0.0f;
node[1].s.B = 0.0f;
node[1].s.A = 1.0f;
node[2].s.R = 0.0f;
node[2].s.V = 0.0f;
node[2].s.B = 1.0f;
node[2].s.A = 1.0f;
node[3].s.R = 0.0f;
node[3].s.V = 1.0f;
node[3].s.B = 0.0f;
node[3].s.A = 1.0f;
// createNode(&node[0], &node[1], &node[2], 1.0f);
node[0].fg = 1;
node[0].fd = 2;
node[1].fg = 0;
node[1].fd = 0;
node[2].fg = 0;
node[2].fd = 0;
node[3].fg = 0;
node[3].fd = 0;
#ifdef DEBUG_RT_CUDA
h_debug_float4 = (float4*)calloc(DEBUG_NUM, sizeof(float4));
h_debug_uint = (uint*)calloc(DEBUG_NUM, sizeof(uint));
CUDA_SAFE_CALL(
hipMalloc((void**)&d_debug_float4, DEBUG_NUM * sizeof(float4)));
CUDA_SAFE_CALL(hipMalloc((void**)&d_debug_uint, DEBUG_NUM * sizeof(uint)));
CUDA_SAFE_CALL(hipMemcpyAsync(d_debug_float4, h_debug_float4,
DEBUG_NUM * sizeof(float4),
hipMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(hipMemcpyAsync(d_debug_uint, h_debug_uint,
DEBUG_NUM * sizeof(uint),
hipMemcpyHostToDevice, stream_app));
#endif
printf("I am in Initobjet");
c_output = (uint*)calloc(width * height, sizeof(uint));
CUDA_SAFE_CALL(hipMalloc((void**)&r_output, width * height * sizeof(uint)));
CUDA_SAFE_CALL(hipMalloc((void**)&d_node, numObj * sizeof(Node)));
CUDA_SAFE_CALL(hipMemcpyAsync(d_node, node, numObj * sizeof(Node),
hipMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(hipMemcpyToSymbolAsync(cnode, node, numObj * sizeof(Node), 0,
hipMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(hipMemcpyToSymbolAsync(MView, (void*)&obs, 3 * sizeof(float4),
0, hipMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(hipMalloc((void**)&d_temp, width * height * sizeof(uint)));
CUDA_SAFE_CALL(hipMemset(d_temp, 0, width * height * sizeof(uint)));
CUDA_SAFE_CALL(hipMalloc((void**)&nObj, width * height * sizeof(uint)));
CUDA_SAFE_CALL(hipMalloc((void**)&prof, width * height * sizeof(float)));
CUDA_SAFE_CALL(hipMalloc((void**)&ray, width * height * sizeof(Rayon)));
CUDA_SAFE_CALL(hipMalloc((void**)&A, width * height * sizeof(float3)));
CUDA_SAFE_CALL(hipMalloc((void**)&u, width * height * sizeof(float3)));
}
#define PRINT_PIXELS
// Rendu de l'image avec CUDA
void render(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
// map PBO to get CUDA device pointer <GY: replace with memcpy?>
// CUDA_SAFE_CALL(hipGLMapBufferObject__((void**)&r_output, pbo));
// CUDA_SAFE_CALL( hipMemcpy( r_output, c_output, width*height*sizeof(uint),
// hipMemcpyHostToDevice) );
// call CUDA kernel, writing results to PBO
CUT_SAFE_CALL(cutStartTimer(timer));
#ifdef DEBUG_RT_CUDA
hipLaunchKernelGGL(( render), dim3(gridSize), dim3(blockSize), 0, stream_app,
d_debug_float4, d_debug_uint, r_output, d_node, width, height, anim,
obs.getDistance());
#else
hipLaunchKernelGGL(( render), dim3(gridSize), dim3(blockSize), 0, stream_app,
r_output, d_node, width, height, anim, obs.getDistance());
#endif
// CUDA_SAFE_CALL( hipDeviceSynchronize() );
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
else {
cutilSafeCall(hipDeviceSynchronize());
}
CUT_SAFE_CALL(cutStopTimer(timer));
#ifdef DEBUG_RT_CUDA
CUDA_SAFE_CALL(hipMemcpyAsync(h_debug_float4, d_debug_float4,
DEBUG_NUM * sizeof(float4),
hipMemcpyDeviceToHost, stream_app));
CUDA_SAFE_CALL(hipMemcpyAsync(h_debug_uint, d_debug_uint,
DEBUG_NUM * sizeof(uint),
hipMemcpyDeviceToHost, stream_app));
printf("debug_float4\n");
for (int i = 0; i < DEBUG_NUM; i++) {
printf("%e %e %e %e\n", h_debug_float4[i].x, h_debug_float4[i].y,
h_debug_float4[i].z, h_debug_float4[i].w);
}
printf("debug_uint\n");
for (int i = 0; i < DEBUG_NUM; i++) {
printf("0x%x\n", h_debug_uint[i]);
}
#endif
CUDA_SAFE_CALL(hipMemcpyAsync(c_output, r_output,
width * height * sizeof(uint),
hipMemcpyDeviceToHost, stream_app));
unsigned long long int checksum = 0;
for (int y = (height - 1); y >= 0; y--) {
if (g_verbose_ray)
printf("\n");
for (int x = 0; x < width; x++) {
if (g_verbose_ray)
printf("%010u ", (unsigned)c_output[x + y * width]);
checksum += c_output[x + y * width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
CUT_CHECK_ERROR("Erreur kernel");
// CUDA_SAFE_CALL(hipGLUnmapBufferObject(pbo)); //<GY: replace with memcpy?>
}
// Affichage du resultat avec OpenGL
void display(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
render(stream_app, mutexapp, flag);
// CUT_SAFE_CALL(cutStopTimer(timer));
printf("Kernel Time: %f \n", cutGetTimerValue(timer));
if (anim >= 1.0f)
pas = -0.015f;
else if (anim <= -1.0f)
pas = 0.015f;
anim += pas;
t--;
if (!t) {
return;
}
}
int ox, oy;
int buttonState = 0;
int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void initPixelBuffer() {
/*if (pbo) {
// delete old buffer
CUDA_SAFE_CALL(hipGLUnregisterBufferObject(pbo));
glDeleteBuffersARB(1, &pbo);
}*/
NUM = width * height;
phi = 2.0f / (float)min(width, height);
// create pixel buffer object for display
/* glGenBuffersARB(1, &pbo);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB,
width*height*sizeof(GLubyte)*4, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL(hipGLRegisterBufferObject(pbo));*/
// calculate new grid size
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main_ray(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(hipSetDevice(dev));
width = 256;
height = 256;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutResetTimer(timer));
initialize_bmp(width, height, 32);
printf("initialize bmp is done");
initObjet(stream_app);
initPixelBuffer();
display(stream_app, mutexapp, flag);
create_bmp(c_output);
CUT_SAFE_CALL(cutDeleteTimer(timer));
return 0;
}
| f6152f00f93d1ede428a254b166f3b9452047ac8.cu | /*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "../benchmark_common.h"
#include "makebmp.h"
/*#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>*/
#include <cutil.h>
typedef unsigned int uint;
typedef unsigned char uchar;
#define numObj 4
#define PI 3.141592654f
#define Angle(a) ((a * PI) / 180.0)
//#define DEVICE_EMU
//#define DEBUG_RT_CUDA
#define FIXED_CONST_PARSE
#ifdef DEBUG_RT_CUDA
#define DEBUG_NUM 8
float4* d_debug_float4;
uint* d_debug_uint;
float4* h_debug_float4;
uint* h_debug_uint;
#endif
int g_verbose_ray;
#include "rayTracing_kernel.cu"
unsigned width = 64; // 640; //512; //16; //32; //512;
unsigned height = 64; // 480; //512; //16;//512;
dim3 blockSize(16, 8);
dim3 gridSize(width / blockSize.x, height / blockSize.y);
float3 viewRotation;
float3 viewTranslation = make_float3(0.0, 0.0, -4.0f);
float invViewMatrix[12];
// static int fpsCount = 0; // FPS count for averaging
// static int fpsLimit = 1; // FPS limit for sampling
unsigned int timer;
// GLuint pbo = 0; // Pixel buffer d'OpenGL
void initPixelBuffer();
class Observateur {
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur();
Observateur(const float3&, const float3&, const float3&, double);
inline const matrice3x4& getMatrice() const { return M; }
inline float getDistance() const { return df; }
};
Observateur::Observateur() {
M.m[0] = make_float4(0.0f, 0.0f, 1.0f, 0.0f);
M.m[1] = make_float4(0.0f, 1.0f, 0.0f, 0.0f);
M.m[2] = make_float4(1.0f, 0.0f, 0.0f, 0.0f);
df = 1.0 / tan(Angle(65) / 2.0);
}
Observateur::Observateur(const float3& p,
const float3& u,
const float3& v,
double a) {
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U, VP) * U);
W = normalize(cross(U, V));
M.m[0] = make_float4(U.x, U.y, U.z, p.x);
M.m[1] = make_float4(V.x, V.y, V.z, p.y);
M.m[2] = make_float4(W.x, W.y, W.z, p.z);
df = 1.0 / tan(Angle(a) / 2.0);
}
float anim = 0.0f, pas = 0.015f;
Observateur obs = Observateur(
make_float3(0.0f, 0.5f, 2.0f),
normalize(make_float3(0.0f, 0.0f, 0.0f) - make_float3(0.0f, 0.5f, 2.0f)),
make_float3(0.0f, 1.0f, 0.0f),
65.0f);
;
uint *values = NULL, *r_output, *d_temp, NUM;
uint* c_output;
Node node[numObj], *d_node;
Sphere s, s1, s2;
float phi;
uint* nObj;
float* prof;
Rayon* ray;
float3 *A, *u;
int t = 1;
void initObjet(cudaStream_t stream_app) {
srand(47);
node->s.r = 1.0f;
node[0].s.C = make_float3(0.0f, -1.5f, -0.0f);
node[0].s.r = 0.5f;
node[1].s.C = make_float3(-1.0f, 0.0f, -1.0f);
node[1].s.r = 0.5f;
node[2].s.C = make_float3(1.0f, -0.f, -1.0f);
node[2].s.r = 0.5f;
node[3].s.C = make_float3(0.0f, -0.f, -2.0f);
node[3].s.r = 0.75f;
for (int i(4); i < numObj; i++) {
float r, v, b;
float tmp1(5.0f * ((r = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp2(5.0f * ((v = (float(rand() % 255) / 255.0f))) - 2.5f);
float tmp3(-5.0f * ((b = (float(rand() % 255) / 255.0f))));
float tmp4((rand() % 100) / 100.0f);
node[i].s.C = make_float3(tmp1, tmp2, tmp3);
node[i].s.r = tmp4;
node[i].s.R = r;
node[i].s.V = v;
node[i].s.B = b;
node[i].s.A = 1.0f;
node[i].fg = 0;
node[i].fd = 0;
}
node[0].s.R = 0.0f;
node[0].s.V = 1.0f;
node[0].s.B = 1.0f;
node[0].s.A = 1.0f;
node[1].s.R = 1.0f;
node[1].s.V = 0.0f;
node[1].s.B = 0.0f;
node[1].s.A = 1.0f;
node[2].s.R = 0.0f;
node[2].s.V = 0.0f;
node[2].s.B = 1.0f;
node[2].s.A = 1.0f;
node[3].s.R = 0.0f;
node[3].s.V = 1.0f;
node[3].s.B = 0.0f;
node[3].s.A = 1.0f;
// createNode(&node[0], &node[1], &node[2], 1.0f);
node[0].fg = 1;
node[0].fd = 2;
node[1].fg = 0;
node[1].fd = 0;
node[2].fg = 0;
node[2].fd = 0;
node[3].fg = 0;
node[3].fd = 0;
#ifdef DEBUG_RT_CUDA
h_debug_float4 = (float4*)calloc(DEBUG_NUM, sizeof(float4));
h_debug_uint = (uint*)calloc(DEBUG_NUM, sizeof(uint));
CUDA_SAFE_CALL(
cudaMalloc((void**)&d_debug_float4, DEBUG_NUM * sizeof(float4)));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_debug_uint, DEBUG_NUM * sizeof(uint)));
CUDA_SAFE_CALL(cudaMemcpyAsync(d_debug_float4, h_debug_float4,
DEBUG_NUM * sizeof(float4),
cudaMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(cudaMemcpyAsync(d_debug_uint, h_debug_uint,
DEBUG_NUM * sizeof(uint),
cudaMemcpyHostToDevice, stream_app));
#endif
printf("I am in Initobjet");
c_output = (uint*)calloc(width * height, sizeof(uint));
CUDA_SAFE_CALL(cudaMalloc((void**)&r_output, width * height * sizeof(uint)));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_node, numObj * sizeof(Node)));
CUDA_SAFE_CALL(cudaMemcpyAsync(d_node, node, numObj * sizeof(Node),
cudaMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(cudaMemcpyToSymbolAsync(cnode, node, numObj * sizeof(Node), 0,
cudaMemcpyHostToDevice, stream_app));
CUDA_SAFE_CALL(cudaMemcpyToSymbolAsync(MView, (void*)&obs, 3 * sizeof(float4),
0, cudaMemcpyHostToDevice,
stream_app));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_temp, width * height * sizeof(uint)));
CUDA_SAFE_CALL(cudaMemset(d_temp, 0, width * height * sizeof(uint)));
CUDA_SAFE_CALL(cudaMalloc((void**)&nObj, width * height * sizeof(uint)));
CUDA_SAFE_CALL(cudaMalloc((void**)&prof, width * height * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void**)&ray, width * height * sizeof(Rayon)));
CUDA_SAFE_CALL(cudaMalloc((void**)&A, width * height * sizeof(float3)));
CUDA_SAFE_CALL(cudaMalloc((void**)&u, width * height * sizeof(float3)));
}
#define PRINT_PIXELS
// Rendu de l'image avec CUDA
void render(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
// map PBO to get CUDA device pointer <GY: replace with memcpy?>
// CUDA_SAFE_CALL(cudaGLMapBufferObject((void**)&r_output, pbo));
// CUDA_SAFE_CALL( cudaMemcpy( r_output, c_output, width*height*sizeof(uint),
// cudaMemcpyHostToDevice) );
// call CUDA kernel, writing results to PBO
CUT_SAFE_CALL(cutStartTimer(timer));
#ifdef DEBUG_RT_CUDA
render<<<gridSize, blockSize, 0, stream_app>>>(
d_debug_float4, d_debug_uint, r_output, d_node, width, height, anim,
obs.getDistance());
#else
render<<<gridSize, blockSize, 0, stream_app>>>(
r_output, d_node, width, height, anim, obs.getDistance());
#endif
// CUDA_SAFE_CALL( cudaThreadSynchronize() );
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
else {
cutilSafeCall(cudaThreadSynchronize());
}
CUT_SAFE_CALL(cutStopTimer(timer));
#ifdef DEBUG_RT_CUDA
CUDA_SAFE_CALL(cudaMemcpyAsync(h_debug_float4, d_debug_float4,
DEBUG_NUM * sizeof(float4),
cudaMemcpyDeviceToHost, stream_app));
CUDA_SAFE_CALL(cudaMemcpyAsync(h_debug_uint, d_debug_uint,
DEBUG_NUM * sizeof(uint),
cudaMemcpyDeviceToHost, stream_app));
printf("debug_float4\n");
for (int i = 0; i < DEBUG_NUM; i++) {
printf("%e %e %e %e\n", h_debug_float4[i].x, h_debug_float4[i].y,
h_debug_float4[i].z, h_debug_float4[i].w);
}
printf("debug_uint\n");
for (int i = 0; i < DEBUG_NUM; i++) {
printf("0x%x\n", h_debug_uint[i]);
}
#endif
CUDA_SAFE_CALL(cudaMemcpyAsync(c_output, r_output,
width * height * sizeof(uint),
cudaMemcpyDeviceToHost, stream_app));
unsigned long long int checksum = 0;
for (int y = (height - 1); y >= 0; y--) {
if (g_verbose_ray)
printf("\n");
for (int x = 0; x < width; x++) {
if (g_verbose_ray)
printf("%010u ", (unsigned)c_output[x + y * width]);
checksum += c_output[x + y * width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
CUT_CHECK_ERROR("Erreur kernel");
// CUDA_SAFE_CALL(cudaGLUnmapBufferObject(pbo)); //<GY: replace with memcpy?>
}
// Affichage du resultat avec OpenGL
void display(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
render(stream_app, mutexapp, flag);
// CUT_SAFE_CALL(cutStopTimer(timer));
printf("Kernel Time: %f \n", cutGetTimerValue(timer));
if (anim >= 1.0f)
pas = -0.015f;
else if (anim <= -1.0f)
pas = 0.015f;
anim += pas;
t--;
if (!t) {
return;
}
}
int ox, oy;
int buttonState = 0;
int iDivUp(int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void initPixelBuffer() {
/*if (pbo) {
// delete old buffer
CUDA_SAFE_CALL(cudaGLUnregisterBufferObject(pbo));
glDeleteBuffersARB(1, &pbo);
}*/
NUM = width * height;
phi = 2.0f / (float)min(width, height);
// create pixel buffer object for display
/* glGenBuffersARB(1, &pbo);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB,
width*height*sizeof(GLubyte)*4, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
CUDA_SAFE_CALL(cudaGLRegisterBufferObject(pbo));*/
// calculate new grid size
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main_ray(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
} else
CUDA_SAFE_CALL(cudaSetDevice(dev));
width = 256;
height = 256;
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutResetTimer(timer));
initialize_bmp(width, height, 32);
printf("initialize bmp is done");
initObjet(stream_app);
initPixelBuffer();
display(stream_app, mutexapp, flag);
create_bmp(c_output);
CUT_SAFE_CALL(cutDeleteTimer(timer));
return 0;
}
|
717f20fbb2df44d0036444b5d54b9b91f9cdcfdc.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = platform::DataLayout;
template <typename T, int D>
static void DataTranspose(const framework::ExecutionContext& ctx,
const Tensor* input, Tensor* output,
const std::vector<int>& axis, int flag = 0) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
math::Transpose<platform::CUDADeviceContext, T, D> transpose;
auto in_dims = input->dims();
std::vector<int64_t> input_transpose_vec;
for (size_t i = 0; i < axis.size(); ++i) {
if (flag == 0)
input_transpose_vec.push_back(in_dims[axis[i]]);
else
input_transpose_vec.push_back(in_dims[i]);
}
framework::DDim input_transpose_dims(
framework::make_ddim(input_transpose_vec));
output->mutable_data<T>(input_transpose_dims, ctx.GetPlace());
transpose(dev_ctx, *input, output, axis);
}
template <typename T>
class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
const T* filter_data = filter->data<T>();
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec = framework::vectorize<int>(output->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
}
} else {
input_transpose = *input;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_transpose.dims()[0];
new_input_shape_vec[1] = input_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
input_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."));
}
} else {
transformed_input = input_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
std::vector<int64_t> starts(data_dim, 0);
std::vector<int64_t> ends(data_dim, 0);
std::vector<int64_t> axes(data_dim, 0);
for (size_t i = 0; i < data_dim; ++i) {
starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
ends[i] = starts[i] + output_vec[i + 2];
axes[i] = i + 2;
}
const T* input_data = transformed_input.data<T>();
input_vec = framework::vectorize<int>(transformed_input.dims());
std::vector<int> transformed_output_vec = output_vec;
for (size_t i = 0; i < data_dim; ++i) {
transformed_output_vec[i + 2] =
output_vec[i + 2] +
(input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1];
}
Tensor transformed_output;
if (!is_sys_pad) {
DDim transformed_output_shape(
framework::make_ddim(transformed_output_vec));
transformed_output.mutable_data<T>(transformed_output_shape,
ctx.GetPlace());
} else {
output->mutable_data<T>(ctx.GetPlace());
transformed_output.ShareDataWith(*output);
transformed_output.Resize(framework::make_ddim(transformed_output_vec));
}
T* transformed_output_data = transformed_output.data<T>();
DataLayout layout;
int iwo_groups = groups;
int c_groups = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
size_t workspace_size = 0;
cudnnConvolutionBwdDataAlgo_t algo{};
// ------------------- cudnn conv algorithm ---------------------
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
auto layout_tensor = GetCudnnTensorFormat(layout);
bool deterministic = FLAGS_cudnn_deterministic;
auto dtype = platform::CudnnDataType<T>::type;
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_output,
filter,
&transformed_input,
strides,
padding_common,
dilations,
dtype};
args.handle = handle;
args.idesc.set(transformed_output, iwo_groups);
args.wdesc.set(*filter, layout_tensor, iwo_groups);
args.odesc.set(transformed_input, iwo_groups);
args.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
algo = search::Find<T>(args, false, deterministic, ctx);
workspace_size =
::max(workspace_size, search::GetWorkspaceSize(args, algo));
// ------------------- cudnn conv transpose forward ---------------------
int input_offset =
transformed_input.numel() / transformed_input.dims()[0] / groups;
int output_offset =
transformed_output.numel() / transformed_output.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args.wdesc.desc(),
filter_data + filter_offset * g, args.odesc.desc(),
input_data + input_offset * g, args.cdesc.desc(), algo,
cudnn_workspace, workspace_size, &beta, args.idesc.desc(),
transformed_output_data + output_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (!is_sys_pad && strides.size() == 2U) {
Slice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_output, output, starts, ends, axes);
} else if (!is_sys_pad && strides.size() == 3U) {
Slice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_output, output, starts, ends, axes);
}
if (data_layout == DataLayout::kNHWC) {
Tensor output_transpose;
Tensor output_nchw;
output_nchw.ShareDataWith(*output);
output_nchw.Resize(framework::make_ddim(output_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
}
}
}
};
template <typename T>
class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int user_workspace_size = ctx.Attr<int>("workspace_size_MB");
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
Tensor output_grad_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec =
framework::vectorize<int>(output_grad->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
DataTranspose<T, 4>(ctx, output_grad, &output_grad_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
DataTranspose<T, 5>(ctx, output_grad, &output_grad_transpose, axis);
}
} else {
input_transpose = *input;
output_grad_transpose = *output_grad;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_output_grad;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_output_grad_shape_vec(data_dim + 2);
new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0];
new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_output_grad_shape_vec[i + 2] =
output_grad_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_output_grad_shape(
framework::make_ddim(new_output_grad_shape_vec));
transformed_output_grad.Resize(new_output_grad_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_output_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_output_grad_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."));
}
} else {
transformed_output_grad = output_grad_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = input_transpose.data<T>();
const T* output_grad_data = transformed_output_grad.data<T>();
output_vec = framework::vectorize<int>(transformed_output_grad.dims());
// ------------------- cudnn descriptors ---------------------
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
int iwo_groups = groups;
int c_groups = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
ConvArgs args1{&transformed_output_grad,
filter,
&input_transpose,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{&transformed_output_grad,
filter,
&input_transpose,
strides,
padding_common,
dilations,
dtype};
cudnnConvolutionFwdAlgo_t data_algo{};
cudnnConvolutionBwdFilterAlgo_t filter_algo{};
auto layout_tensor = GetCudnnTensorFormat(layout);
size_t workspace_size = 0;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
bool deterministic = FLAGS_cudnn_deterministic;
T* input_grad_data = nullptr;
T* filter_grad_data = nullptr;
if (input_grad)
input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
if (filter_grad)
filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
if (input_grad) {
input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
args1.handle = handle;
args1.idesc.set(transformed_output_grad, iwo_groups);
args1.wdesc.set(*filter, layout_tensor, iwo_groups);
args1.odesc.set(input_transpose, iwo_groups);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
data_algo = search1::Find<T>(args1, false, deterministic, ctx);
workspace_size =
::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
args2.handle = handle;
args2.idesc.set(transformed_output_grad, iwo_groups);
args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups);
args2.odesc.set(input_transpose, iwo_groups);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo = search2::Find<T>(args2, false, deterministic, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
// FIXME(typhoonzero): template type T may not be the same as cudnn call.
int input_offset = input->numel() / input->dims()[0] / groups;
int output_grad_offset = transformed_output_grad.numel() /
transformed_output_grad.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
output_grad_data + output_grad_offset * g, args1.wdesc.desc(),
filter_data + filter_offset * g, args1.cdesc.desc(),
data_algo, cudnn_workspace, workspace_size, &beta,
args1.odesc.desc(), input_grad_data + input_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (data_layout == DataLayout::kNHWC) {
Tensor input_grad_transpose;
Tensor input_grad_nchw;
input_grad_nchw.ShareDataWith(*input_grad);
input_grad_nchw.Resize(framework::make_ddim(input_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
// Gradient with respect to the filter
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
output_grad_data + output_grad_offset * g, args2.odesc.desc(),
input_data + input_offset * g, args2.cdesc.desc(),
filter_algo, cudnn_workspace, workspace_size, &beta,
args2.wdesc.desc(), filter_grad_data + filter_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
| 717f20fbb2df44d0036444b5d54b9b91f9cdcfdc.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = platform::DataLayout;
template <typename T, int D>
static void DataTranspose(const framework::ExecutionContext& ctx,
const Tensor* input, Tensor* output,
const std::vector<int>& axis, int flag = 0) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
math::Transpose<platform::CUDADeviceContext, T, D> transpose;
auto in_dims = input->dims();
std::vector<int64_t> input_transpose_vec;
for (size_t i = 0; i < axis.size(); ++i) {
if (flag == 0)
input_transpose_vec.push_back(in_dims[axis[i]]);
else
input_transpose_vec.push_back(in_dims[i]);
}
framework::DDim input_transpose_dims(
framework::make_ddim(input_transpose_vec));
output->mutable_data<T>(input_transpose_dims, ctx.GetPlace());
transpose(dev_ctx, *input, output, axis);
}
template <typename T>
class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
const T* filter_data = filter->data<T>();
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec = framework::vectorize<int>(output->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
}
} else {
input_transpose = *input;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_transpose.dims()[0];
new_input_shape_vec[1] = input_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
input_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."));
}
} else {
transformed_input = input_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
std::vector<int64_t> starts(data_dim, 0);
std::vector<int64_t> ends(data_dim, 0);
std::vector<int64_t> axes(data_dim, 0);
for (size_t i = 0; i < data_dim; ++i) {
starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
ends[i] = starts[i] + output_vec[i + 2];
axes[i] = i + 2;
}
const T* input_data = transformed_input.data<T>();
input_vec = framework::vectorize<int>(transformed_input.dims());
std::vector<int> transformed_output_vec = output_vec;
for (size_t i = 0; i < data_dim; ++i) {
transformed_output_vec[i + 2] =
output_vec[i + 2] +
(input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1];
}
Tensor transformed_output;
if (!is_sys_pad) {
DDim transformed_output_shape(
framework::make_ddim(transformed_output_vec));
transformed_output.mutable_data<T>(transformed_output_shape,
ctx.GetPlace());
} else {
output->mutable_data<T>(ctx.GetPlace());
transformed_output.ShareDataWith(*output);
transformed_output.Resize(framework::make_ddim(transformed_output_vec));
}
T* transformed_output_data = transformed_output.data<T>();
DataLayout layout;
int iwo_groups = groups;
int c_groups = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
size_t workspace_size = 0;
cudnnConvolutionBwdDataAlgo_t algo{};
// ------------------- cudnn conv algorithm ---------------------
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
auto layout_tensor = GetCudnnTensorFormat(layout);
bool deterministic = FLAGS_cudnn_deterministic;
auto dtype = platform::CudnnDataType<T>::type;
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_output,
filter,
&transformed_input,
strides,
padding_common,
dilations,
dtype};
args.handle = handle;
args.idesc.set(transformed_output, iwo_groups);
args.wdesc.set(*filter, layout_tensor, iwo_groups);
args.odesc.set(transformed_input, iwo_groups);
args.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
algo = search::Find<T>(args, false, deterministic, ctx);
workspace_size =
std::max(workspace_size, search::GetWorkspaceSize(args, algo));
// ------------------- cudnn conv transpose forward ---------------------
int input_offset =
transformed_input.numel() / transformed_input.dims()[0] / groups;
int output_offset =
transformed_output.numel() / transformed_output.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args.wdesc.desc(),
filter_data + filter_offset * g, args.odesc.desc(),
input_data + input_offset * g, args.cdesc.desc(), algo,
cudnn_workspace, workspace_size, &beta, args.idesc.desc(),
transformed_output_data + output_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (!is_sys_pad && strides.size() == 2U) {
Slice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_output, output, starts, ends, axes);
} else if (!is_sys_pad && strides.size() == 3U) {
Slice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_output, output, starts, ends, axes);
}
if (data_layout == DataLayout::kNHWC) {
Tensor output_transpose;
Tensor output_nchw;
output_nchw.ShareDataWith(*output);
output_nchw.Resize(framework::make_ddim(output_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
}
}
}
};
template <typename T>
class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int user_workspace_size = ctx.Attr<int>("workspace_size_MB");
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
Tensor output_grad_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec =
framework::vectorize<int>(output_grad->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
DataTranspose<T, 4>(ctx, output_grad, &output_grad_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
DataTranspose<T, 5>(ctx, output_grad, &output_grad_transpose, axis);
}
} else {
input_transpose = *input;
output_grad_transpose = *output_grad;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_output_grad;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_output_grad_shape_vec(data_dim + 2);
new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0];
new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_output_grad_shape_vec[i + 2] =
output_grad_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_output_grad_shape(
framework::make_ddim(new_output_grad_shape_vec));
transformed_output_grad.Resize(new_output_grad_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_output_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_output_grad_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor."));
}
} else {
transformed_output_grad = output_grad_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = input_transpose.data<T>();
const T* output_grad_data = transformed_output_grad.data<T>();
output_vec = framework::vectorize<int>(transformed_output_grad.dims());
// ------------------- cudnn descriptors ---------------------
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
int iwo_groups = groups;
int c_groups = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
ConvArgs args1{&transformed_output_grad,
filter,
&input_transpose,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{&transformed_output_grad,
filter,
&input_transpose,
strides,
padding_common,
dilations,
dtype};
cudnnConvolutionFwdAlgo_t data_algo{};
cudnnConvolutionBwdFilterAlgo_t filter_algo{};
auto layout_tensor = GetCudnnTensorFormat(layout);
size_t workspace_size = 0;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
bool deterministic = FLAGS_cudnn_deterministic;
T* input_grad_data = nullptr;
T* filter_grad_data = nullptr;
if (input_grad)
input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
if (filter_grad)
filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
if (input_grad) {
input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
args1.handle = handle;
args1.idesc.set(transformed_output_grad, iwo_groups);
args1.wdesc.set(*filter, layout_tensor, iwo_groups);
args1.odesc.set(input_transpose, iwo_groups);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
data_algo = search1::Find<T>(args1, false, deterministic, ctx);
workspace_size =
std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
args2.handle = handle;
args2.idesc.set(transformed_output_grad, iwo_groups);
args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups);
args2.odesc.set(input_transpose, iwo_groups);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo = search2::Find<T>(args2, false, deterministic, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
// FIXME(typhoonzero): template type T may not be the same as cudnn call.
int input_offset = input->numel() / input->dims()[0] / groups;
int output_grad_offset = transformed_output_grad.numel() /
transformed_output_grad.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
output_grad_data + output_grad_offset * g, args1.wdesc.desc(),
filter_data + filter_offset * g, args1.cdesc.desc(),
data_algo, cudnn_workspace, workspace_size, &beta,
args1.odesc.desc(), input_grad_data + input_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
if (data_layout == DataLayout::kNHWC) {
Tensor input_grad_transpose;
Tensor input_grad_nchw;
input_grad_nchw.ShareDataWith(*input_grad);
input_grad_nchw.Resize(framework::make_ddim(input_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
// Gradient with respect to the filter
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
output_grad_data + output_grad_offset * g, args2.odesc.desc(),
input_data + input_offset * g, args2.cdesc.desc(),
filter_algo, cudnn_workspace, workspace_size, &beta,
args2.wdesc.desc(), filter_grad_data + filter_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
|
e669be55fd413084ae9c3aa4779f28e1d3bb94ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "math.h"
#include "rocblas.h"
#include <hip/hip_fp16.h>
#include <iostream>
#include <sys/time.h>
//nvcc -lcublas cublas.c -o cublas.out
void main2()
{
int i,j,k,index;
// Linear dimension of matrices
int dim = 100;
int dim2 = 1;
int batch_count = 10000;
// Allocate host storage for batch_count A,B,C square matrices
half* h_A = (half*)malloc(sizeof(half) * dim2 * dim * batch_count);
half* h_B = (half*)malloc(sizeof(half) * dim * dim * batch_count);
half* h_C = (half*)malloc(sizeof(half) * dim * dim * batch_count);
for(k=0; k<batch_count; k++) {
for(j=0; j<dim; j++) {
for(i=0; i<dim; i++) {
index = i*dim + j + k*dim*dim;
//h_A[index] = index*index + 0.0f;
h_B[index] = index + 1.0f;
h_C[index] = 0.0f;
}
}
}
for(k=0; k<batch_count; k++) {
for(j=0; j<dim2; j++) {
for(i=0; i<dim; i++) {
index = i*dim + j + k*dim*dim;
h_A[index] = index*index + 0.0f;
}
}
}
half *d_A, *d_B, *d_C;
hipMalloc(&d_A, sizeof(half) * dim2 * dim * batch_count);
hipMalloc(&d_B, sizeof(half) * dim * dim * batch_count);
hipMalloc(&d_C, sizeof(half) * dim * dim * batch_count);
hipMemcpy(h_A,d_A,sizeof(half) * dim2 * dim * batch_count,hipMemcpyDeviceToHost);
hipMemcpy(h_B,d_B,sizeof(half) * dim * dim * batch_count,hipMemcpyDeviceToHost);
hipMemcpy(h_C,d_C,sizeof(half) * dim * dim * batch_count,hipMemcpyDeviceToHost);
hipblasHandle_t handle;
hipblasCreate(&handle);
printf("hi");
// Do the actual multiplication
struct timeval t1, t2;
half alpha = 1.0f; half beta = 1.0f;
for (int za=0 ; za<50000; za++)
{
hipblasHgemmStridedBatched(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
dim, dim2, dim,
&alpha,
(const half*)d_A, dim,
dim2*dim,
(const half*)d_B, dim,
dim*dim,
&beta,
d_C, dim,
dim*dim,
batch_count);
}
hipMemcpy(h_C,d_C,sizeof(half) * dim * dim * batch_count,hipMemcpyDeviceToHost);
// Destroy the handle
hipblasDestroy(handle);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
int main(){
main2();
printf("Success!\n");
return 0;
} | e669be55fd413084ae9c3aa4779f28e1d3bb94ec.cu | #include <stdio.h>
#include <stdlib.h>
#include "math.h"
#include "cublas_v2.h"
#include <cuda_fp16.h>
#include <iostream>
#include <sys/time.h>
//nvcc -lcublas cublas.c -o cublas.out
void main2()
{
int i,j,k,index;
// Linear dimension of matrices
int dim = 100;
int dim2 = 1;
int batch_count = 10000;
// Allocate host storage for batch_count A,B,C square matrices
half* h_A = (half*)malloc(sizeof(half) * dim2 * dim * batch_count);
half* h_B = (half*)malloc(sizeof(half) * dim * dim * batch_count);
half* h_C = (half*)malloc(sizeof(half) * dim * dim * batch_count);
for(k=0; k<batch_count; k++) {
for(j=0; j<dim; j++) {
for(i=0; i<dim; i++) {
index = i*dim + j + k*dim*dim;
//h_A[index] = index*index + 0.0f;
h_B[index] = index + 1.0f;
h_C[index] = 0.0f;
}
}
}
for(k=0; k<batch_count; k++) {
for(j=0; j<dim2; j++) {
for(i=0; i<dim; i++) {
index = i*dim + j + k*dim*dim;
h_A[index] = index*index + 0.0f;
}
}
}
half *d_A, *d_B, *d_C;
cudaMalloc(&d_A, sizeof(half) * dim2 * dim * batch_count);
cudaMalloc(&d_B, sizeof(half) * dim * dim * batch_count);
cudaMalloc(&d_C, sizeof(half) * dim * dim * batch_count);
cudaMemcpy(h_A,d_A,sizeof(half) * dim2 * dim * batch_count,cudaMemcpyDeviceToHost);
cudaMemcpy(h_B,d_B,sizeof(half) * dim * dim * batch_count,cudaMemcpyDeviceToHost);
cudaMemcpy(h_C,d_C,sizeof(half) * dim * dim * batch_count,cudaMemcpyDeviceToHost);
cublasHandle_t handle;
cublasCreate(&handle);
printf("hi");
// Do the actual multiplication
struct timeval t1, t2;
half alpha = 1.0f; half beta = 1.0f;
for (int za=0 ; za<50000; za++)
{
cublasHgemmStridedBatched(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
dim, dim2, dim,
&alpha,
(const half*)d_A, dim,
dim2*dim,
(const half*)d_B, dim,
dim*dim,
&beta,
d_C, dim,
dim*dim,
batch_count);
}
cudaMemcpy(h_C,d_C,sizeof(half) * dim * dim * batch_count,cudaMemcpyDeviceToHost);
// Destroy the handle
cublasDestroy(handle);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
int main(){
main2();
printf("Success!\n");
return 0;
} |
9278e2cbbf98059c78eea7d6822789a300079435.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
csymv.cu is nearly identical to chemv.cu, just change names and drop cuConjf.
csymv_kernel_U (upper) in csymv_upper.cu is very similar to
csymv_kernel_L (lower) in csymv.cu; diff the two files to compare.
Note: [ds] precisions generated from chemv.cu
@generated from zsymv.cu normal z -> c, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ A11*x1 --- ]
work = [ A12*x2 (A21*x1 + A22*x2) --- ]
[ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
csymv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(ty2*4 + j, tx2) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = rA[j] * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end csymv_kernel_L
/**************************************************************
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
********************************************************************/
__global__ void
csymv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/**
Purpose
-------
magmablas_csymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements csymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_csymv_work requires users to provide a workspace, while
magmablas_csymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call csymv frequently, we suggest using
magmablas_csymv_work instead of magmablas_csymv. As the overhead to
allocate and free in device memory in magmablas_csymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_cblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_csymv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( csymv_kernel_U), dim3(grid), dim3(threads), 0, queue ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( csymv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( csymv_kernel_L), dim3(grid), dim3(threads), 0, queue ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( csymv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_csymv_work
/**
Purpose
-------
magmablas_csymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@ingroup magma_cblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_csymv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy)
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; no CUBLAS version of csymv.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_csymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, magma_stream );
magma_free( dwork );
return info;
}
// end magmablas_csymv
| 9278e2cbbf98059c78eea7d6822789a300079435.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
csymv.cu is nearly identical to chemv.cu, just change names and drop cuConjf.
csymv_kernel_U (upper) in csymv_upper.cu is very similar to
csymv_kernel_L (lower) in csymv.cu; diff the two files to compare.
Note: [ds] precisions generated from chemv.cu
@generated from zsymv.cu normal z -> c, Fri Sep 11 18:29:22 2015
@author Mark Gates
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ A11*x1 --- ]
work = [ A12*x2 (A21*x1 + A22*x2) --- ]
[ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
********************************************************************/
__global__ void
csymv_kernel_L(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(ty2*4 + j, tx2) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = rA[j] * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end csymv_kernel_L
/**************************************************************
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
********************************************************************/
__global__ void
csymv_kernel_L_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/**
Purpose
-------
magmablas_csymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements csymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_csymv_work requires users to provide a workspace, while
magmablas_csymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call csymv frequently, we suggest using
magmablas_csymv_work instead of magmablas_csymv. As the overhead to
allocate and free in device memory in magmablas_csymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_cblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_csymv_work(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magmaFloatComplex_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
csymv_kernel_U<<< grid, threads, 0, queue >>>
(n, dA, ldda, dx, incx, dwork);
csymv_kernel_U_sum<<< grid, threads_sum, 0, queue >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
csymv_kernel_L<<< grid, threads, 0, queue >>>
(n, dA, ldda, dx, incx, dwork);
csymv_kernel_L_sum<<< grid, threads_sum, 0, queue >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_csymv_work
/**
Purpose
-------
magmablas_csymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n complex symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy COMPLEX array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@ingroup magma_cblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_csymv(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy)
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; no CUBLAS version of csymv.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
//magma_csymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy );
//return MAGMA_SUCCESS;
fprintf(stderr, "%s: %s\n", __func__, "not supported on CUDA ARCH 1.x");
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) )
return info;
magmaFloatComplex_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_cmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_csymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, magma_stream );
magma_free( dwork );
return info;
}
// end magmablas_csymv
|
ae783d3d4d14b850b4cb4c5d1469c52d8b90675d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "morphology.h"
template <typename T>
__global__ void ErodeSharedStep1(const int batch_size, const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx - radio;
int y = by * tile_h + ty;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x < 0 || x >= width || y >= height * batch_size) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( x < (bx * tile_w) || x >= (bx + 1) * tile_w ) {
return;
}
int *smem_thread = &smem[ty * blockDim.x + tx - radio];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void ErodeSharedStep2(const int batch_size, const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx;
int y = by * tile_h + ty - radio;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x >= width || y < 0 || y >= height * batch_size) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( y < (by * tile_h) || y >= (by + 1) * tile_h) {
return;
}
int *smem_thread = &smem[(ty - radio) * blockDim.x + tx];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i * blockDim.x] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void ErodeSharedStep1(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx - radio;
int y = by * tile_h + ty;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x < 0 || x >= width || y >= height) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( x < (bx * tile_w) || x >= (bx + 1) * tile_w ) {
return;
}
int *smem_thread = &smem[ty * blockDim.x + tx - radio];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void ErodeSharedStep2(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx;
int y = by * tile_h + ty - radio;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x >= width || y < 0 || y >= height) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( y < (by * tile_h) || y >= (by + 1) * tile_h ) {
return;
}
int *smem_thread = &smem[(ty - radio) * blockDim.x + tx];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i * blockDim.x] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void DilateSharedStep1(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx - radio;
int y = by * tile_h + ty;
smem[ty * blockDim.x + tx] = 0;
__syncthreads();
if( x < 0 || x >= width || y >= height ) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( x < (bx * tile_w) || x >= (bx + 1) * tile_w ) {
return;
}
int *smem_thread = &smem[ty * blockDim.x + tx - radio];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MAX( val, smem_thread[i] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void DilateSharedStep2(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx;
int y = by * tile_h + ty - radio;
smem[ty * blockDim.x + tx] = 0;
__syncthreads();
if( x >= width || y < 0 || y >= height ) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( y < (by * tile_h) || y >= (by + 1) * tile_h ) {
return;
}
int *smem_thread = &smem[(ty - radio) * blockDim.x + tx];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MAX( val, smem_thread[i * blockDim.x] );
}
dst[y * width + x] = (uint8_t)val;
}
void ErodeTwoStepShared(const int batch_size, void *src, void *temp, void *dst,
int radio, int width, int height, hipStream_t& stream)
{
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil(batch_size * (float)height / tile_h1) );
hipLaunchKernelGGL(( ErodeSharedStep1), dim3(grid1), dim3(block1), block1.y * block1.x * sizeof(int), stream,
batch_size,
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// hipDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil(batch_size * (float)height / tile_h2) );
hipLaunchKernelGGL(( ErodeSharedStep2), dim3(grid2), dim3(block2), block2.y * block2.x * sizeof(int), stream,
batch_size,
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// hipDeviceSynchronize();
}
void ErodeTwoStepShared(void *src, void *temp, void *dst,
int radio, int width, int height, hipStream_t& stream)
{
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
hipLaunchKernelGGL(( ErodeSharedStep1), dim3(grid1), dim3(block1), block1.y * block1.x * sizeof(int), stream,
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// hipDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
hipLaunchKernelGGL(( ErodeSharedStep2), dim3(grid2), dim3(block2), block2.y * block2.x * sizeof(int), stream,
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// hipDeviceSynchronize();
}
void ErodeTwoStepShared(void *src, void *dst,
int radio, int width, int height, hipStream_t& stream)
{
void *temp = NULL;
hipMalloc( &temp, width * height * sizeof(uint8_t) );
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
hipLaunchKernelGGL(( ErodeSharedStep1), dim3(grid1), dim3(block1), block1.y * block1.x * sizeof(int), stream,
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// hipDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
hipLaunchKernelGGL(( ErodeSharedStep2), dim3(grid2), dim3(block2), block2.y * block2.x * sizeof(int), stream,
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// hipDeviceSynchronize();
hipFree( temp );
}
void DilateTwoStepShared(void *src, void *temp, void *dst,
int radio, int width, int height, hipStream_t& stream)
{
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
hipLaunchKernelGGL(( DilateSharedStep1), dim3(grid1), dim3(block1), block1.y * block1.x * sizeof(int), stream,
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// hipDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
hipLaunchKernelGGL(( DilateSharedStep2), dim3(grid2), dim3(block2), block2.y * block2.x * sizeof(int), stream,
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// hipDeviceSynchronize();
}
void DilateTwoStepShared(void *src, void *dst,
int radio, int width, int height, hipStream_t& stream)
{
void *temp = NULL;
hipMalloc( &temp, width * height * sizeof(uint8_t) );
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
hipLaunchKernelGGL(( DilateSharedStep1), dim3(grid1), dim3(block1), block1.y * block1.x * sizeof(int), stream,
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// hipDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
hipLaunchKernelGGL(( DilateSharedStep2), dim3(grid2), dim3(block2), block2.y * block2.x * sizeof(int), stream,
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// hipDeviceSynchronize();
hipFree( temp );
}
| ae783d3d4d14b850b4cb4c5d1469c52d8b90675d.cu | #include "morphology.h"
template <typename T>
__global__ void ErodeSharedStep1(const int batch_size, const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx - radio;
int y = by * tile_h + ty;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x < 0 || x >= width || y >= height * batch_size) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( x < (bx * tile_w) || x >= (bx + 1) * tile_w ) {
return;
}
int *smem_thread = &smem[ty * blockDim.x + tx - radio];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void ErodeSharedStep2(const int batch_size, const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx;
int y = by * tile_h + ty - radio;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x >= width || y < 0 || y >= height * batch_size) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( y < (by * tile_h) || y >= (by + 1) * tile_h) {
return;
}
int *smem_thread = &smem[(ty - radio) * blockDim.x + tx];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i * blockDim.x] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void ErodeSharedStep1(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx - radio;
int y = by * tile_h + ty;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x < 0 || x >= width || y >= height) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( x < (bx * tile_w) || x >= (bx + 1) * tile_w ) {
return;
}
int *smem_thread = &smem[ty * blockDim.x + tx - radio];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void ErodeSharedStep2(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx;
int y = by * tile_h + ty - radio;
smem[ty * blockDim.x + tx] = 255;
__syncthreads();
if( x >= width || y < 0 || y >= height) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( y < (by * tile_h) || y >= (by + 1) * tile_h ) {
return;
}
int *smem_thread = &smem[(ty - radio) * blockDim.x + tx];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MIN( val, smem_thread[i * blockDim.x] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void DilateSharedStep1(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx - radio;
int y = by * tile_h + ty;
smem[ty * blockDim.x + tx] = 0;
__syncthreads();
if( x < 0 || x >= width || y >= height ) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( x < (bx * tile_w) || x >= (bx + 1) * tile_w ) {
return;
}
int *smem_thread = &smem[ty * blockDim.x + tx - radio];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MAX( val, smem_thread[i] );
}
dst[y * width + x] = (uint8_t)val;
}
template <typename T>
__global__ void DilateSharedStep2(const T *src, T *dst, int radio, int width, int height, int tile_w, int tile_h)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * tile_w + tx;
int y = by * tile_h + ty - radio;
smem[ty * blockDim.x + tx] = 0;
__syncthreads();
if( x >= width || y < 0 || y >= height ) {
return;
}
smem[ty * blockDim.x + tx] = (int)src[y * width + x];
__syncthreads();
if( y < (by * tile_h) || y >= (by + 1) * tile_h ) {
return;
}
int *smem_thread = &smem[(ty - radio) * blockDim.x + tx];
int val = smem_thread[0];
for( int i = 1; i <= 2 * radio; i++ ) {
val = MAX( val, smem_thread[i * blockDim.x] );
}
dst[y * width + x] = (uint8_t)val;
}
void ErodeTwoStepShared(const int batch_size, void *src, void *temp, void *dst,
int radio, int width, int height, cudaStream_t& stream)
{
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil(batch_size * (float)height / tile_h1) );
ErodeSharedStep1<<<grid1, block1, block1.y * block1.x * sizeof(int), stream>>>(
batch_size,
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// cudaDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil(batch_size * (float)height / tile_h2) );
ErodeSharedStep2<<<grid2, block2, block2.y * block2.x * sizeof(int), stream>>>(
batch_size,
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// cudaDeviceSynchronize();
}
void ErodeTwoStepShared(void *src, void *temp, void *dst,
int radio, int width, int height, cudaStream_t& stream)
{
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
ErodeSharedStep1<<<grid1, block1, block1.y * block1.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// cudaDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
ErodeSharedStep2<<<grid2, block2, block2.y * block2.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// cudaDeviceSynchronize();
}
void ErodeTwoStepShared(void *src, void *dst,
int radio, int width, int height, cudaStream_t& stream)
{
void *temp = NULL;
cudaMalloc( &temp, width * height * sizeof(uint8_t) );
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
ErodeSharedStep1<<<grid1, block1, block1.y * block1.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// cudaDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
ErodeSharedStep2<<<grid2, block2, block2.y * block2.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// cudaDeviceSynchronize();
cudaFree( temp );
}
void DilateTwoStepShared(void *src, void *temp, void *dst,
int radio, int width, int height, cudaStream_t& stream)
{
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
DilateSharedStep1<<<grid1, block1, block1.y * block1.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// cudaDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
DilateSharedStep2<<<grid2, block2, block2.y * block2.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// cudaDeviceSynchronize();
}
void DilateTwoStepShared(void *src, void *dst,
int radio, int width, int height, cudaStream_t& stream)
{
void *temp = NULL;
cudaMalloc( &temp, width * height * sizeof(uint8_t) );
int tile_w1 = 640;
int tile_h1 = 1;
dim3 block1( tile_w1 + 2 * radio, tile_h1 );
dim3 grid1( ceil((float)width / tile_w1), ceil((float)height / tile_h1) );
DilateSharedStep1<<<grid1, block1, block1.y * block1.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(src),
static_cast<uint8_t*>(temp),
radio, width, height, tile_w1, tile_h1);
// cudaDeviceSynchronize();
int tile_w2 = 8;
int tile_h2 = 64;
dim3 block2( tile_w2, tile_h2 + 2 * radio );
dim3 grid2( ceil((float)width / tile_w2), ceil((float)height / tile_h2) );
DilateSharedStep2<<<grid2, block2, block2.y * block2.x * sizeof(int), stream>>>(
static_cast<const uint8_t* const>(temp),
static_cast<uint8_t*>(dst),
radio, width, height, tile_w2, tile_h2);
// cudaDeviceSynchronize();
cudaFree( temp );
}
|
bdfe3f4403e8268f1beca43eac8cb4ff3d8d846d.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "task_data.h"
/**
* \brief CUDA kernel for check buffer
*
* \param sharedMemory area for exchange status information with host
* \param nbuf number of buffer
*
*
*/
__global__ void checkCounterKernel( long *sharedMemory, int nbuf )
{
__shared__ int shFlagIrq;
TaskMonitor *ptrMonitor = (TaskMonitor*)sharedMemory;
TaskBufferStatus *ts=(TaskBufferStatus *)sharedMemory;
ts+=nbuf;
uint64_t step = TaskCounts;
int size=ts->sizeOfKBytes;
int cnt=1024/8*size/step;
uint64_t expect_data=nbuf*1024*size/8;
expect_data += threadIdx.x;
uint64_t *src = (uint64_t*)(ts->ptrCudaIn);
src+=threadIdx.x;
uint64_t *dst;
TaskCheckData* check= &(ts->check[threadIdx.x]);
unsigned int totalErrorForBuf=0;
unsigned int errorCnt=0;
unsigned int block_rd=0;
unsigned int block_ok=0;
unsigned int block_error=0;
unsigned int flagError=0;
TaskHostStatus *ptrHostStatus = ts->ptrHostStatus;
shFlagIrq=0;
//printf( "src=%p x=%d y=%d z=%d expect_data=0x%.8lX\n", src, threadIdx.x, threadIdx.y, threadIdx.z, expect_data );
for( int loop=0; ; loop++ )
{
if( 1==ptrMonitor->flagExit )
{
break;
}
if( 0==threadIdx.x )
shFlagIrq=ts->irqFlag;
if( 1!=shFlagIrq )
{
for( volatile int jj=0; jj<1000; jj++ );
continue;
}
src = (uint64_t*)(ts->ptrCudaIn);
src+=threadIdx.x;
__syncthreads();
flagError=0;
check->flagError=1;
if( 0==threadIdx.x )
{
dst=(uint64_t*)(ts->ptrCudaOut);
dst+= ts->indexWr * cnt;
for( int ii=0; ii<cnt; ii++ )
{
uint64_t val;
val = *src; src+=step;
*dst++ = val;
if( val!=expect_data )
{
if( errorCnt<16 )
{
check->nblock[errorCnt]=block_rd;
check->adr[errorCnt]=ii;
check->expect_data[errorCnt]=expect_data;
check->receive_data[errorCnt]=val;
}
errorCnt++;
flagError++;
}
expect_data+=step;
}
{
int n=ts->indexWr+1;
if( n==ts->indexMax )
n=0;
ts->indexWr=n;
ptrHostStatus->indexWr=n;
}
} else
{
for( int ii=0; ii<cnt; ii++ )
{
uint64_t val;
val = *src; src+=step;
if( val!=expect_data )
{
if( errorCnt<16 )
{
check->nblock[errorCnt]=block_rd;
check->adr[errorCnt]=ii;
check->expect_data[errorCnt]=expect_data;
check->receive_data[errorCnt]=val;
}
errorCnt++;
flagError++;
}
expect_data+=step;
}
}
check->flagError=flagError;
check->cntError=errorCnt;
if( 0==threadIdx.x )
ptrMonitor->block[nbuf].irqFlag=0;
expect_data += 2*1024*size/8;
__syncthreads();
block_rd++;
if( 0==threadIdx.x )
{
// Check all task
unsigned int flagErr=0;
for( int ii=0; ii<TaskCounts; ii++ )
{
if( ts->check[ii].flagError )
{
flagErr=1;
}
}
if( 0==flagErr)
{
block_ok++;
} else
{
block_error++;
}
ts->blockRd=block_rd;
ts->blockOk=block_ok;
ts->blockError=block_error;
//printf( "buf: %d expect_data= 0x%.8lX \n", nbuf, expect_data );
}
}
}
/**
* \brief start checkCounterKernel
*
* \param sharedMemory pointer in CUDA memory of shared data
* \param nbuf number of buffer
* \param stream CUDA stream for this kernel
*
*/
int run_checkCounter( long *sharedMemory, int nbuf, hipStream_t& stream )
{
//Kernel configuration, where a two-dimensional grid and
//three-dimensional blocks are configured.
dim3 dimGrid(1, 1);
dim3 dimBlock(TaskCounts, 1, 1);
hipLaunchKernelGGL(( checkCounterKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, sharedMemory, nbuf );
return 0;
}
//__global__ void MonitorKernel( long* sharedMemory, int nbuf, unsigned int index_rd )
//{
//
// TaskMonitor *ptrMonitor = (TaskMonitor*)sharedMemory;
// TaskBufferStatus *ts=(TaskBufferStatus *)sharedMemory;
// ts+=nbuf;
//
// for( int loop=0; ; loop++ )
// {
// if( 1==ptrMonitor->flagExit )
// {
// break;
// }
//
// if( index_rd!=ptrMonitor->block[0].indexWr )
// break;
//
// for( volatile int jj=0; jj<10000; jj++ );
// }
//
//
//}
//
//int run_Monitor( long* sharedMemory, int nbuf, unsigned int index_rd, hipStream_t stream )
//{
//
// //Kernel configuration, where a two-dimensional grid and
// //three-dimensional blocks are configured.
// dim3 dimGrid(1, 1);
// dim3 dimBlock(1, 1, 1);
// MonitorKernel<<<dimGrid, dimBlock, 0, stream>>>(sharedMemory, nbuf, index_rd );
//
//
//}
| bdfe3f4403e8268f1beca43eac8cb4ff3d8d846d.cu |
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "task_data.h"
/**
* \brief CUDA kernel for check buffer
*
* \param sharedMemory area for exchange status information with host
* \param nbuf number of buffer
*
*
*/
__global__ void checkCounterKernel( long *sharedMemory, int nbuf )
{
__shared__ int shFlagIrq;
TaskMonitor *ptrMonitor = (TaskMonitor*)sharedMemory;
TaskBufferStatus *ts=(TaskBufferStatus *)sharedMemory;
ts+=nbuf;
uint64_t step = TaskCounts;
int size=ts->sizeOfKBytes;
int cnt=1024/8*size/step;
uint64_t expect_data=nbuf*1024*size/8;
expect_data += threadIdx.x;
uint64_t *src = (uint64_t*)(ts->ptrCudaIn);
src+=threadIdx.x;
uint64_t *dst;
TaskCheckData* check= &(ts->check[threadIdx.x]);
unsigned int totalErrorForBuf=0;
unsigned int errorCnt=0;
unsigned int block_rd=0;
unsigned int block_ok=0;
unsigned int block_error=0;
unsigned int flagError=0;
TaskHostStatus *ptrHostStatus = ts->ptrHostStatus;
shFlagIrq=0;
//printf( "src=%p x=%d y=%d z=%d expect_data=0x%.8lX\n", src, threadIdx.x, threadIdx.y, threadIdx.z, expect_data );
for( int loop=0; ; loop++ )
{
if( 1==ptrMonitor->flagExit )
{
break;
}
if( 0==threadIdx.x )
shFlagIrq=ts->irqFlag;
if( 1!=shFlagIrq )
{
for( volatile int jj=0; jj<1000; jj++ );
continue;
}
src = (uint64_t*)(ts->ptrCudaIn);
src+=threadIdx.x;
__syncthreads();
flagError=0;
check->flagError=1;
if( 0==threadIdx.x )
{
dst=(uint64_t*)(ts->ptrCudaOut);
dst+= ts->indexWr * cnt;
for( int ii=0; ii<cnt; ii++ )
{
uint64_t val;
val = *src; src+=step;
*dst++ = val;
if( val!=expect_data )
{
if( errorCnt<16 )
{
check->nblock[errorCnt]=block_rd;
check->adr[errorCnt]=ii;
check->expect_data[errorCnt]=expect_data;
check->receive_data[errorCnt]=val;
}
errorCnt++;
flagError++;
}
expect_data+=step;
}
{
int n=ts->indexWr+1;
if( n==ts->indexMax )
n=0;
ts->indexWr=n;
ptrHostStatus->indexWr=n;
}
} else
{
for( int ii=0; ii<cnt; ii++ )
{
uint64_t val;
val = *src; src+=step;
if( val!=expect_data )
{
if( errorCnt<16 )
{
check->nblock[errorCnt]=block_rd;
check->adr[errorCnt]=ii;
check->expect_data[errorCnt]=expect_data;
check->receive_data[errorCnt]=val;
}
errorCnt++;
flagError++;
}
expect_data+=step;
}
}
check->flagError=flagError;
check->cntError=errorCnt;
if( 0==threadIdx.x )
ptrMonitor->block[nbuf].irqFlag=0;
expect_data += 2*1024*size/8;
__syncthreads();
block_rd++;
if( 0==threadIdx.x )
{
// Check all task
unsigned int flagErr=0;
for( int ii=0; ii<TaskCounts; ii++ )
{
if( ts->check[ii].flagError )
{
flagErr=1;
}
}
if( 0==flagErr)
{
block_ok++;
} else
{
block_error++;
}
ts->blockRd=block_rd;
ts->blockOk=block_ok;
ts->blockError=block_error;
//printf( "buf: %d expect_data= 0x%.8lX \n", nbuf, expect_data );
}
}
}
/**
* \brief start checkCounterKernel
*
* \param sharedMemory pointer in CUDA memory of shared data
* \param nbuf number of buffer
* \param stream CUDA stream for this kernel
*
*/
int run_checkCounter( long *sharedMemory, int nbuf, cudaStream_t& stream )
{
//Kernel configuration, where a two-dimensional grid and
//three-dimensional blocks are configured.
dim3 dimGrid(1, 1);
dim3 dimBlock(TaskCounts, 1, 1);
checkCounterKernel<<<dimGrid, dimBlock, 0, stream>>>( sharedMemory, nbuf );
return 0;
}
//__global__ void MonitorKernel( long* sharedMemory, int nbuf, unsigned int index_rd )
//{
//
// TaskMonitor *ptrMonitor = (TaskMonitor*)sharedMemory;
// TaskBufferStatus *ts=(TaskBufferStatus *)sharedMemory;
// ts+=nbuf;
//
// for( int loop=0; ; loop++ )
// {
// if( 1==ptrMonitor->flagExit )
// {
// break;
// }
//
// if( index_rd!=ptrMonitor->block[0].indexWr )
// break;
//
// for( volatile int jj=0; jj<10000; jj++ );
// }
//
//
//}
//
//int run_Monitor( long* sharedMemory, int nbuf, unsigned int index_rd, cudaStream_t stream )
//{
//
// //Kernel configuration, where a two-dimensional grid and
// //three-dimensional blocks are configured.
// dim3 dimGrid(1, 1);
// dim3 dimBlock(1, 1, 1);
// MonitorKernel<<<dimGrid, dimBlock, 0, stream>>>(sharedMemory, nbuf, index_rd );
//
//
//}
|
d4ad3bf360321b0e0506f7f92b07d89b1cbf7cd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
namespace oneflow {
namespace {
template<typename T, typename IDX>
__global__ void BinaryConcatKernel(const IDX out_elems, const IDX out_cols, const IDX in0_cols,
const IDX in1_cols, const T* src0, const T* src1, T* dst) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, out_elems) {
const IDX row = i / out_cols;
const IDX col = i - row * out_cols;
const T* src_ptr = nullptr;
if (col < in0_cols) {
src_ptr = src0 + row * in0_cols + col;
} else {
src_ptr = src1 + row * in1_cols + (col - in0_cols);
}
dst[i] = *src_ptr;
}
}
template<typename T, typename IDX>
void LaunchBinaryConcatKernel(ep::Stream* stream, const IDX rows, const IDX in0_cols,
const IDX in1_cols, const void* src0, const void* src1, void* dst) {
const IDX out_cols = in0_cols + in1_cols;
const IDX out_elems = rows * out_cols;
RUN_CUDA_KERNEL((BinaryConcatKernel<T, IDX>), stream, out_elems, out_elems, out_cols, in0_cols,
in1_cols, reinterpret_cast<const T*>(src0), reinterpret_cast<const T*>(src1),
reinterpret_cast<T*>(dst));
}
template<typename T>
void DispatchIndexType(ep::Stream* stream, const int64_t rows, const int64_t in0_cols,
const int64_t in1_cols, const void* src0, const void* src1, void* dst) {
if (rows * (in0_cols + in1_cols) >= (1 >> 30)) {
LaunchBinaryConcatKernel<T, int64_t>(stream, rows, in0_cols, in1_cols, src0, src1, dst);
} else {
LaunchBinaryConcatKernel<T, int32_t>(stream, rows, in0_cols, in1_cols, src0, src1, dst);
}
}
void DispatchDataType(ep::Stream* stream, const int64_t rows, const int64_t in0_cols,
const int64_t in1_cols, const void* src0, const void* src1, void* dst) {
const uintptr_t src0_ptr = reinterpret_cast<uintptr_t>(src0);
const uintptr_t src1_ptr = reinterpret_cast<uintptr_t>(src1);
const uintptr_t dst_ptr = reinterpret_cast<uintptr_t>(dst);
const auto IsAligned = [&](const size_t alignment) {
return src0_ptr % alignment == 0 && src1_ptr % alignment == 0 && dst_ptr % alignment == 0
&& in0_cols % alignment == 0 && in1_cols % alignment == 0;
};
if (IsAligned(16)) {
DispatchIndexType<uint4>(stream, rows, in0_cols / 16, in1_cols / 16, src0, src1, dst);
} else if (IsAligned(8)) {
DispatchIndexType<uint2>(stream, rows, in0_cols / 8, in1_cols / 8, src0, src1, dst);
} else if (IsAligned(4)) {
DispatchIndexType<uint32_t>(stream, rows, in0_cols / 4, in1_cols / 4, src0, src1, dst);
} else if (IsAligned(2)) {
DispatchIndexType<uint16_t>(stream, rows, in0_cols / 2, in1_cols / 2, src0, src1, dst);
} else {
DispatchIndexType<uint8_t>(stream, rows, in0_cols, in1_cols, src0, src1, dst);
}
}
void DispatchBinaryConcat(ep::Stream* stream, const int64_t elem_size, const int64_t rows,
const int64_t in0_cols, const int64_t in1_cols, const void* src0,
const void* src1, void* dst) {
DispatchDataType(stream, rows, in0_cols * elem_size, in1_cols * elem_size, src0, src1, dst);
}
class ConcatKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
ConcatKernel() = default;
~ConcatKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("out", 0);
const DataType data_type = out_tensor->data_type();
if (out_tensor->shape_view().elem_cnt() == 0) { return; }
const int64_t axis = ctx->Attr<int64_t>("axis");
CHECK_GE(axis, 0);
const int64_t num_axes = out_tensor->shape_view().NumAxes();
CHECK_LT(axis, num_axes);
const int64_t out_cols = out_tensor->shape_view().Count(axis);
const int64_t rows = out_tensor->shape_view().elem_cnt() / out_cols;
CHECK_GT(rows, 0);
CHECK_EQ(ctx->input_size("in"), 2);
const user_op::Tensor* in0_tensor = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* in1_tensor = ctx->Tensor4ArgNameAndIndex("in", 1);
CHECK_EQ(in0_tensor->data_type(), data_type);
CHECK_EQ(in1_tensor->data_type(), data_type);
if (in0_tensor->shape_view().elem_cnt() == 0) {
CHECK_EQ(in1_tensor->shape_view(), out_tensor->shape_view());
Memcpy<DeviceType::kCUDA>(ctx->stream(), out_tensor->mut_dptr(), in1_tensor->dptr(),
out_tensor->shape_view().elem_cnt() * GetSizeOfDataType(data_type));
return;
}
if (in1_tensor->shape_view().elem_cnt() == 0) {
CHECK_EQ(in0_tensor->shape_view(), out_tensor->shape_view());
Memcpy<DeviceType::kCUDA>(ctx->stream(), out_tensor->mut_dptr(), in0_tensor->dptr(),
out_tensor->shape_view().elem_cnt() * GetSizeOfDataType(data_type));
return;
}
CHECK_EQ(in0_tensor->shape_view().NumAxes(), num_axes);
CHECK_EQ(in1_tensor->shape_view().NumAxes(), num_axes);
for (int64_t i = 0; i < num_axes; ++i) {
if (i != axis) {
CHECK_EQ(in0_tensor->shape_view().At(i), out_tensor->shape_view().At(i));
CHECK_EQ(in1_tensor->shape_view().At(i), out_tensor->shape_view().At(i));
}
}
CHECK_EQ(in0_tensor->shape_view().At(axis) + in1_tensor->shape_view().At(axis),
out_tensor->shape_view().At(axis));
const int64_t in0_cols = in0_tensor->shape_view().Count(axis);
const int64_t in1_cols = in1_tensor->shape_view().Count(axis);
DispatchBinaryConcat(ctx->stream(), GetSizeOfDataType(data_type), rows, in0_cols, in1_cols,
in0_tensor->dptr(), in1_tensor->dptr(), out_tensor->mut_dptr());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
} // namespace
REGISTER_USER_KERNEL("cat")
.SetCreateFn<ConcatKernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA)
&& (user_op::HobInputSize("in") == 2))
.SetPriority(user_op::kKernelPriorityOptimized);
} // namespace oneflow
| d4ad3bf360321b0e0506f7f92b07d89b1cbf7cd0.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
namespace oneflow {
namespace {
template<typename T, typename IDX>
__global__ void BinaryConcatKernel(const IDX out_elems, const IDX out_cols, const IDX in0_cols,
const IDX in1_cols, const T* src0, const T* src1, T* dst) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, out_elems) {
const IDX row = i / out_cols;
const IDX col = i - row * out_cols;
const T* src_ptr = nullptr;
if (col < in0_cols) {
src_ptr = src0 + row * in0_cols + col;
} else {
src_ptr = src1 + row * in1_cols + (col - in0_cols);
}
dst[i] = *src_ptr;
}
}
template<typename T, typename IDX>
void LaunchBinaryConcatKernel(ep::Stream* stream, const IDX rows, const IDX in0_cols,
const IDX in1_cols, const void* src0, const void* src1, void* dst) {
const IDX out_cols = in0_cols + in1_cols;
const IDX out_elems = rows * out_cols;
RUN_CUDA_KERNEL((BinaryConcatKernel<T, IDX>), stream, out_elems, out_elems, out_cols, in0_cols,
in1_cols, reinterpret_cast<const T*>(src0), reinterpret_cast<const T*>(src1),
reinterpret_cast<T*>(dst));
}
template<typename T>
void DispatchIndexType(ep::Stream* stream, const int64_t rows, const int64_t in0_cols,
const int64_t in1_cols, const void* src0, const void* src1, void* dst) {
if (rows * (in0_cols + in1_cols) >= (1 >> 30)) {
LaunchBinaryConcatKernel<T, int64_t>(stream, rows, in0_cols, in1_cols, src0, src1, dst);
} else {
LaunchBinaryConcatKernel<T, int32_t>(stream, rows, in0_cols, in1_cols, src0, src1, dst);
}
}
void DispatchDataType(ep::Stream* stream, const int64_t rows, const int64_t in0_cols,
const int64_t in1_cols, const void* src0, const void* src1, void* dst) {
const uintptr_t src0_ptr = reinterpret_cast<uintptr_t>(src0);
const uintptr_t src1_ptr = reinterpret_cast<uintptr_t>(src1);
const uintptr_t dst_ptr = reinterpret_cast<uintptr_t>(dst);
const auto IsAligned = [&](const size_t alignment) {
return src0_ptr % alignment == 0 && src1_ptr % alignment == 0 && dst_ptr % alignment == 0
&& in0_cols % alignment == 0 && in1_cols % alignment == 0;
};
if (IsAligned(16)) {
DispatchIndexType<uint4>(stream, rows, in0_cols / 16, in1_cols / 16, src0, src1, dst);
} else if (IsAligned(8)) {
DispatchIndexType<uint2>(stream, rows, in0_cols / 8, in1_cols / 8, src0, src1, dst);
} else if (IsAligned(4)) {
DispatchIndexType<uint32_t>(stream, rows, in0_cols / 4, in1_cols / 4, src0, src1, dst);
} else if (IsAligned(2)) {
DispatchIndexType<uint16_t>(stream, rows, in0_cols / 2, in1_cols / 2, src0, src1, dst);
} else {
DispatchIndexType<uint8_t>(stream, rows, in0_cols, in1_cols, src0, src1, dst);
}
}
void DispatchBinaryConcat(ep::Stream* stream, const int64_t elem_size, const int64_t rows,
const int64_t in0_cols, const int64_t in1_cols, const void* src0,
const void* src1, void* dst) {
DispatchDataType(stream, rows, in0_cols * elem_size, in1_cols * elem_size, src0, src1, dst);
}
class ConcatKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
ConcatKernel() = default;
~ConcatKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("out", 0);
const DataType data_type = out_tensor->data_type();
if (out_tensor->shape_view().elem_cnt() == 0) { return; }
const int64_t axis = ctx->Attr<int64_t>("axis");
CHECK_GE(axis, 0);
const int64_t num_axes = out_tensor->shape_view().NumAxes();
CHECK_LT(axis, num_axes);
const int64_t out_cols = out_tensor->shape_view().Count(axis);
const int64_t rows = out_tensor->shape_view().elem_cnt() / out_cols;
CHECK_GT(rows, 0);
CHECK_EQ(ctx->input_size("in"), 2);
const user_op::Tensor* in0_tensor = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* in1_tensor = ctx->Tensor4ArgNameAndIndex("in", 1);
CHECK_EQ(in0_tensor->data_type(), data_type);
CHECK_EQ(in1_tensor->data_type(), data_type);
if (in0_tensor->shape_view().elem_cnt() == 0) {
CHECK_EQ(in1_tensor->shape_view(), out_tensor->shape_view());
Memcpy<DeviceType::kCUDA>(ctx->stream(), out_tensor->mut_dptr(), in1_tensor->dptr(),
out_tensor->shape_view().elem_cnt() * GetSizeOfDataType(data_type));
return;
}
if (in1_tensor->shape_view().elem_cnt() == 0) {
CHECK_EQ(in0_tensor->shape_view(), out_tensor->shape_view());
Memcpy<DeviceType::kCUDA>(ctx->stream(), out_tensor->mut_dptr(), in0_tensor->dptr(),
out_tensor->shape_view().elem_cnt() * GetSizeOfDataType(data_type));
return;
}
CHECK_EQ(in0_tensor->shape_view().NumAxes(), num_axes);
CHECK_EQ(in1_tensor->shape_view().NumAxes(), num_axes);
for (int64_t i = 0; i < num_axes; ++i) {
if (i != axis) {
CHECK_EQ(in0_tensor->shape_view().At(i), out_tensor->shape_view().At(i));
CHECK_EQ(in1_tensor->shape_view().At(i), out_tensor->shape_view().At(i));
}
}
CHECK_EQ(in0_tensor->shape_view().At(axis) + in1_tensor->shape_view().At(axis),
out_tensor->shape_view().At(axis));
const int64_t in0_cols = in0_tensor->shape_view().Count(axis);
const int64_t in1_cols = in1_tensor->shape_view().Count(axis);
DispatchBinaryConcat(ctx->stream(), GetSizeOfDataType(data_type), rows, in0_cols, in1_cols,
in0_tensor->dptr(), in1_tensor->dptr(), out_tensor->mut_dptr());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
} // namespace
REGISTER_USER_KERNEL("cat")
.SetCreateFn<ConcatKernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA)
&& (user_op::HobInputSize("in") == 2))
.SetPriority(user_op::kKernelPriorityOptimized);
} // namespace oneflow
|
d7dda01d89b38b5f4bde50f37e3572bc1d5d02bc.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BreadthFirstSearch/TopDown2.cuh"
#include <GraphIO/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <hip/hip_runtime_api.h> //--profile-from-start off
int main(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv);
//graph.print();
HornetInit hornet_init(graph.nV(), graph.nE(), graph.out_offsets_ptr(),
graph.out_edges_ptr());
HornetGraph hornet_graph(hornet_init);
//hornet_graph.print();
BfsTopDown2 bfs_top_down(hornet_graph);
bfs_top_down.set_parameters(graph.max_out_degree_id());
Timer<DEVICE> TM;
hipProfilerStart();
TM.start();
bfs_top_down.run();
TM.stop();
hipProfilerStop();
TM.print("TopDown2");
auto is_correct = bfs_top_down.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
| d7dda01d89b38b5f4bde50f37e3572bc1d5d02bc.cu | /**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BreadthFirstSearch/TopDown2.cuh"
#include <GraphIO/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <cuda_profiler_api.h> //--profile-from-start off
int main(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv);
//graph.print();
HornetInit hornet_init(graph.nV(), graph.nE(), graph.out_offsets_ptr(),
graph.out_edges_ptr());
HornetGraph hornet_graph(hornet_init);
//hornet_graph.print();
BfsTopDown2 bfs_top_down(hornet_graph);
bfs_top_down.set_parameters(graph.max_out_degree_id());
Timer<DEVICE> TM;
cudaProfilerStart();
TM.start();
bfs_top_down.run();
TM.stop();
cudaProfilerStop();
TM.print("TopDown2");
auto is_correct = bfs_top_down.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
|
d1a457c696eeaa5099adf709f8dce24d1a387fe5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies
* with kernel execution. This sample illustrates the usage of CUDA streams to
* achieve overlapping of kernel execution with copying data to and from the device.
*
* Additionally, this sample uses CUDA events to measure elapsed time for
* CUDA calls. Events are a part of CUDA API and provide a system independent
* way to measure execution times on CUDA devices with approximately 0.5
* microsecond precision.
*
* Elapsed times are averaged over nreps repetitions (10 by default).
*
*/
const char *sSDKname = "simpleMultiCopy";
// includes, system
#include <stdio.h>
// include CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
// includes, kernels
// Declare the CUDA kernels here and main() code that is needed to launch
// Compute workload on the system
__global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
for (int i=0; i<inner_reps; ++i)
{
g_out[idx] = g_in[idx] + 1;
}
}
}
#define STREAM_COUNT 4
// Uncomment to simulate data source/sink IO times
//#define SIMULATE_IO
int *h_data_source;
int *h_data_sink;
int *h_data_in[STREAM_COUNT];
int *d_data_in[STREAM_COUNT];
int *h_data_out[STREAM_COUNT];
int *d_data_out[STREAM_COUNT];
hipEvent_t cycleDone[STREAM_COUNT];
hipStream_t stream[STREAM_COUNT];
hipEvent_t start, stop;
int N = 1 << 22;
int nreps = 10; // number of times each experiment is repeated
int inner_reps = 5;
int memsize;
dim3 block(512);
dim3 grid;
int thread_blocks;
float processWithStreams(int streams_used);
void init();
bool test();
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
int cuda_device = 0;
float scale_factor;
hipDeviceProp_t deviceProp;
printf("[%s] - Starting...\n", sSDKname);
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
cuda_device = getCmdLineArgumentInt(argc, (const char **)argv, "device=");
if (cuda_device < 0)
{
printf("Invalid command line parameters\n");
exit(EXIT_FAILURE);
}
else
{
printf("cuda_device = %d\n", cuda_device);
cuda_device = gpuDeviceInit(cuda_device);
if (cuda_device < 0)
{
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
}
else
{
// Otherwise pick the device with the highest Gflops/s
cuda_device = gpuGetMaxGflopsDeviceId();
checkCudaErrors(hipSetDevice(cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
printf("> Using CUDA device [%d]: %s\n", cuda_device, deviceProp.name);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// Anything that is less than 32 Cores will have scaled down workload
scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
N = (int)((float)N / scale_factor);
printf("> Device name: %s\n", deviceProp.name);
printf("> CUDA Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor,
deviceProp.multiProcessorCount);
printf("> scale_factor = %.2f\n", 1.0f/scale_factor);
printf("> array_size = %d\n\n", N);
memsize = N * sizeof(int);
thread_blocks = N / block.x;
grid.x = thread_blocks % 65535;
grid.y = (thread_blocks / 65535 + 1);
// Allocate resources
h_data_source = (int *) malloc(memsize);
h_data_sink = (int *) malloc(memsize);
for (int i =0; i<STREAM_COUNT; ++i)
{
checkCudaErrors(hipHostMalloc(&h_data_in[i], memsize,
hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_data_in[i], memsize));
checkCudaErrors(hipHostMalloc(&h_data_out[i], memsize,
hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_data_out[i], memsize));
checkCudaErrors(hipStreamCreate(&stream[i]));
checkCudaErrors(hipEventCreate(&cycleDone[i]));
hipEventRecord(cycleDone[i], stream[i]);
}
hipEventCreate(&start);
hipEventCreate(&stop);
init();
// Kernel warmup
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, 0, d_data_out[0], d_data_in[0], N, inner_reps);
// Time copies and kernel
hipEventRecord(start,0);
checkCudaErrors(hipMemcpyAsync(d_data_in[0], h_data_in[0], memsize,
hipMemcpyHostToDevice,0));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float memcpy_h2d_time;
hipEventElapsedTime(&memcpy_h2d_time, start, stop);
hipEventRecord(start,0);
checkCudaErrors(hipMemcpyAsync(h_data_out[0], d_data_out[0], memsize,
hipMemcpyDeviceToHost, 0));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float memcpy_d2h_time;
hipEventElapsedTime(&memcpy_d2h_time, start, stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block),0,0, d_data_out[0], d_data_in[0], N, inner_reps);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float kernel_time;
hipEventElapsedTime(&kernel_time, start, stop);
printf("\n");
printf("Relevant properties of this CUDA device\n");
printf("(%s) Can overlap one CPU<>GPU data transfer with GPU kernel execution (device property \"deviceOverlap\")\n", deviceProp.deviceOverlap ? "X" : " ");
//printf("(%s) Can execute several GPU kernels simultaneously (compute capability >= 2.0)\n", deviceProp.major >= 2 ? "X": " ");
printf("(%s) Can overlap two CPU<>GPU data transfers with GPU kernel execution\n"
" (Compute Capability >= 2.0 AND (Tesla product OR Quadro 4000/5000/6000/K5000)\n",
(deviceProp.major >= 2 && deviceProp.asyncEngineCount > 1)
? "X" : " ");
printf("\n");
printf("Measured timings (throughput):\n");
printf(" Memcpy host to device\t: %f ms (%f GB/s)\n",
memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time);
printf(" Memcpy device to host\t: %f ms (%f GB/s)\n",
memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time);
printf(" Kernel\t\t\t: %f ms (%f GB/s)\n",
kernel_time, (inner_reps *memsize * 2e-6)/ kernel_time);
printf("\n");
printf("Theoretical limits for speedup gained from overlapped data transfers:\n");
printf("No overlap at all (transfer-kernel-transfer): %f ms \n",
memcpy_h2d_time + memcpy_d2h_time + kernel_time);
printf("Compute can overlap with one transfer: %f ms\n",
max((memcpy_h2d_time + memcpy_d2h_time), kernel_time));
printf("Compute can overlap with both data transfers: %f ms\n",
max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time));
// Process pipelined work
float serial_time = processWithStreams(1);
float overlap_time = processWithStreams(STREAM_COUNT);
printf("\nAverage measured timings over %d repetitions:\n", nreps);
printf(" Avg. time when execution fully serialized\t: %f ms\n",
serial_time / nreps);
printf(" Avg. time when overlapped using %d streams\t: %f ms\n",
STREAM_COUNT, overlap_time / nreps);
printf(" Avg. speedup gained (serialized - overlapped)\t: %f ms\n",
(serial_time - overlap_time) / nreps);
printf("\nMeasured throughput:\n");
printf(" Fully serialized execution\t\t: %f GB/s\n",
(nreps * (memsize * 2e-6))/ serial_time);
printf(" Overlapped using %d streams\t\t: %f GB/s\n",
STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time);
// Verify the results, we will use the results for final output
bool bResults = test();
// Free resources
free(h_data_source);
free(h_data_sink);
for (int i =0; i<STREAM_COUNT; ++i)
{
hipHostFree(h_data_in[i]);
hipFree(d_data_in[i]);
hipHostFree(h_data_out[i]);
hipFree(d_data_out[i]);
hipStreamDestroy(stream[i]);
hipEventDestroy(cycleDone[i]);
}
hipEventDestroy(start);
hipEventDestroy(stop);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
// Test result
exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
float processWithStreams(int streams_used)
{
int current_stream = 0;
float time;
// Do processing in a loop
//
// Note: All memory commands are processed in the order they are issued,
// independent of the stream they are enqueued in. Hence the pattern by
// which the copy and kernel commands are enqueued in the stream
// has an influence on the achieved overlap.
hipEventRecord(start, 0);
for (int i=0; i<nreps; ++i)
{
int next_stream = (current_stream + 1) % streams_used;
#ifdef SIMULATE_IO
// Store the result
memcpy(h_data_sink, h_data_out[current_stream],memsize);
// Read new input
memcpy(h_data_in[next_stream], h_data_source, memsize);
#endif
// Ensure that processing and copying of the last cycle has finished
hipEventSynchronize(cycleDone[next_stream]);
// Process current frame
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block), 0, stream[current_stream],
d_data_out[current_stream],
d_data_in[current_stream],
N,
inner_reps);
// Upload next frame
checkCudaErrors(hipMemcpyAsync(
d_data_in[next_stream],
h_data_in[next_stream],
memsize,
hipMemcpyHostToDevice,
stream[next_stream]));
// Download current frame
checkCudaErrors(hipMemcpyAsync(
h_data_out[current_stream],
d_data_out[current_stream],
memsize,
hipMemcpyDeviceToHost,
stream[current_stream]));
checkCudaErrors(hipEventRecord(
cycleDone[current_stream],
stream[current_stream]));
current_stream = next_stream;
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&time, start, stop);
return time;
}
void init()
{
for (int i=0; i<N; ++i)
{
h_data_source[i] = 0;
}
for (int i =0; i<STREAM_COUNT; ++i)
{
memcpy(h_data_in[i], h_data_source, memsize);
}
}
bool test()
{
bool passed = true;
for (int j =0; j<STREAM_COUNT; ++j)
{
for (int i =0; i<N; ++i)
{
passed &= (h_data_out[j][i] == 1);
}
}
return passed;
}
| d1a457c696eeaa5099adf709f8dce24d1a387fe5.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies
* with kernel execution. This sample illustrates the usage of CUDA streams to
* achieve overlapping of kernel execution with copying data to and from the device.
*
* Additionally, this sample uses CUDA events to measure elapsed time for
* CUDA calls. Events are a part of CUDA API and provide a system independent
* way to measure execution times on CUDA devices with approximately 0.5
* microsecond precision.
*
* Elapsed times are averaged over nreps repetitions (10 by default).
*
*/
const char *sSDKname = "simpleMultiCopy";
// includes, system
#include <stdio.h>
// include CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
// includes, kernels
// Declare the CUDA kernels here and main() code that is needed to launch
// Compute workload on the system
__global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
for (int i=0; i<inner_reps; ++i)
{
g_out[idx] = g_in[idx] + 1;
}
}
}
#define STREAM_COUNT 4
// Uncomment to simulate data source/sink IO times
//#define SIMULATE_IO
int *h_data_source;
int *h_data_sink;
int *h_data_in[STREAM_COUNT];
int *d_data_in[STREAM_COUNT];
int *h_data_out[STREAM_COUNT];
int *d_data_out[STREAM_COUNT];
cudaEvent_t cycleDone[STREAM_COUNT];
cudaStream_t stream[STREAM_COUNT];
cudaEvent_t start, stop;
int N = 1 << 22;
int nreps = 10; // number of times each experiment is repeated
int inner_reps = 5;
int memsize;
dim3 block(512);
dim3 grid;
int thread_blocks;
float processWithStreams(int streams_used);
void init();
bool test();
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *argv[])
{
int cuda_device = 0;
float scale_factor;
cudaDeviceProp deviceProp;
printf("[%s] - Starting...\n", sSDKname);
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
cuda_device = getCmdLineArgumentInt(argc, (const char **)argv, "device=");
if (cuda_device < 0)
{
printf("Invalid command line parameters\n");
exit(EXIT_FAILURE);
}
else
{
printf("cuda_device = %d\n", cuda_device);
cuda_device = gpuDeviceInit(cuda_device);
if (cuda_device < 0)
{
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
}
else
{
// Otherwise pick the device with the highest Gflops/s
cuda_device = gpuGetMaxGflopsDeviceId();
checkCudaErrors(cudaSetDevice(cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
printf("> Using CUDA device [%d]: %s\n", cuda_device, deviceProp.name);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// Anything that is less than 32 Cores will have scaled down workload
scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f);
N = (int)((float)N / scale_factor);
printf("> Device name: %s\n", deviceProp.name);
printf("> CUDA Capability %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor,
deviceProp.multiProcessorCount);
printf("> scale_factor = %.2f\n", 1.0f/scale_factor);
printf("> array_size = %d\n\n", N);
memsize = N * sizeof(int);
thread_blocks = N / block.x;
grid.x = thread_blocks % 65535;
grid.y = (thread_blocks / 65535 + 1);
// Allocate resources
h_data_source = (int *) malloc(memsize);
h_data_sink = (int *) malloc(memsize);
for (int i =0; i<STREAM_COUNT; ++i)
{
checkCudaErrors(cudaHostAlloc(&h_data_in[i], memsize,
cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_data_in[i], memsize));
checkCudaErrors(cudaHostAlloc(&h_data_out[i], memsize,
cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_data_out[i], memsize));
checkCudaErrors(cudaStreamCreate(&stream[i]));
checkCudaErrors(cudaEventCreate(&cycleDone[i]));
cudaEventRecord(cycleDone[i], stream[i]);
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
init();
// Kernel warmup
incKernel<<<grid, block>>>(d_data_out[0], d_data_in[0], N, inner_reps);
// Time copies and kernel
cudaEventRecord(start,0);
checkCudaErrors(cudaMemcpyAsync(d_data_in[0], h_data_in[0], memsize,
cudaMemcpyHostToDevice,0));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float memcpy_h2d_time;
cudaEventElapsedTime(&memcpy_h2d_time, start, stop);
cudaEventRecord(start,0);
checkCudaErrors(cudaMemcpyAsync(h_data_out[0], d_data_out[0], memsize,
cudaMemcpyDeviceToHost, 0));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float memcpy_d2h_time;
cudaEventElapsedTime(&memcpy_d2h_time, start, stop);
cudaEventRecord(start,0);
incKernel<<<grid, block,0,0>>>(d_data_out[0], d_data_in[0], N, inner_reps);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, stop);
printf("\n");
printf("Relevant properties of this CUDA device\n");
printf("(%s) Can overlap one CPU<>GPU data transfer with GPU kernel execution (device property \"deviceOverlap\")\n", deviceProp.deviceOverlap ? "X" : " ");
//printf("(%s) Can execute several GPU kernels simultaneously (compute capability >= 2.0)\n", deviceProp.major >= 2 ? "X": " ");
printf("(%s) Can overlap two CPU<>GPU data transfers with GPU kernel execution\n"
" (Compute Capability >= 2.0 AND (Tesla product OR Quadro 4000/5000/6000/K5000)\n",
(deviceProp.major >= 2 && deviceProp.asyncEngineCount > 1)
? "X" : " ");
printf("\n");
printf("Measured timings (throughput):\n");
printf(" Memcpy host to device\t: %f ms (%f GB/s)\n",
memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time);
printf(" Memcpy device to host\t: %f ms (%f GB/s)\n",
memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time);
printf(" Kernel\t\t\t: %f ms (%f GB/s)\n",
kernel_time, (inner_reps *memsize * 2e-6)/ kernel_time);
printf("\n");
printf("Theoretical limits for speedup gained from overlapped data transfers:\n");
printf("No overlap at all (transfer-kernel-transfer): %f ms \n",
memcpy_h2d_time + memcpy_d2h_time + kernel_time);
printf("Compute can overlap with one transfer: %f ms\n",
max((memcpy_h2d_time + memcpy_d2h_time), kernel_time));
printf("Compute can overlap with both data transfers: %f ms\n",
max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time));
// Process pipelined work
float serial_time = processWithStreams(1);
float overlap_time = processWithStreams(STREAM_COUNT);
printf("\nAverage measured timings over %d repetitions:\n", nreps);
printf(" Avg. time when execution fully serialized\t: %f ms\n",
serial_time / nreps);
printf(" Avg. time when overlapped using %d streams\t: %f ms\n",
STREAM_COUNT, overlap_time / nreps);
printf(" Avg. speedup gained (serialized - overlapped)\t: %f ms\n",
(serial_time - overlap_time) / nreps);
printf("\nMeasured throughput:\n");
printf(" Fully serialized execution\t\t: %f GB/s\n",
(nreps * (memsize * 2e-6))/ serial_time);
printf(" Overlapped using %d streams\t\t: %f GB/s\n",
STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time);
// Verify the results, we will use the results for final output
bool bResults = test();
// Free resources
free(h_data_source);
free(h_data_sink);
for (int i =0; i<STREAM_COUNT; ++i)
{
cudaFreeHost(h_data_in[i]);
cudaFree(d_data_in[i]);
cudaFreeHost(h_data_out[i]);
cudaFree(d_data_out[i]);
cudaStreamDestroy(stream[i]);
cudaEventDestroy(cycleDone[i]);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
// Test result
exit(bResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
float processWithStreams(int streams_used)
{
int current_stream = 0;
float time;
// Do processing in a loop
//
// Note: All memory commands are processed in the order they are issued,
// independent of the stream they are enqueued in. Hence the pattern by
// which the copy and kernel commands are enqueued in the stream
// has an influence on the achieved overlap.
cudaEventRecord(start, 0);
for (int i=0; i<nreps; ++i)
{
int next_stream = (current_stream + 1) % streams_used;
#ifdef SIMULATE_IO
// Store the result
memcpy(h_data_sink, h_data_out[current_stream],memsize);
// Read new input
memcpy(h_data_in[next_stream], h_data_source, memsize);
#endif
// Ensure that processing and copying of the last cycle has finished
cudaEventSynchronize(cycleDone[next_stream]);
// Process current frame
incKernel<<<grid, block, 0, stream[current_stream]>>>(
d_data_out[current_stream],
d_data_in[current_stream],
N,
inner_reps);
// Upload next frame
checkCudaErrors(cudaMemcpyAsync(
d_data_in[next_stream],
h_data_in[next_stream],
memsize,
cudaMemcpyHostToDevice,
stream[next_stream]));
// Download current frame
checkCudaErrors(cudaMemcpyAsync(
h_data_out[current_stream],
d_data_out[current_stream],
memsize,
cudaMemcpyDeviceToHost,
stream[current_stream]));
checkCudaErrors(cudaEventRecord(
cycleDone[current_stream],
stream[current_stream]));
current_stream = next_stream;
}
cudaEventRecord(stop, 0);
cudaDeviceSynchronize();
cudaEventElapsedTime(&time, start, stop);
return time;
}
void init()
{
for (int i=0; i<N; ++i)
{
h_data_source[i] = 0;
}
for (int i =0; i<STREAM_COUNT; ++i)
{
memcpy(h_data_in[i], h_data_source, memsize);
}
}
bool test()
{
bool passed = true;
for (int j =0; j<STREAM_COUNT; ++j)
{
for (int i =0; i<N; ++i)
{
passed &= (h_data_out[j][i] == 1);
}
}
return passed;
}
|
a251f781c49df4920e1ef86739c8e97a10d83c41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "call_kernels.cuh"
// File used to call CUDA kernels from C++ code
void initializeParticlesUni(double *x_pos, double *y_pos, double *x_vel, double *y_vel, double *x_acc, double *y_acc, double *mass, dim3 gridSize, dim3 blockSize)
{
hipLaunchKernelGGL(( initialize_particles_uni), dim3(gridSize), dim3(blockSize), 0, 0, x_pos, y_pos, x_vel, y_vel, x_acc, y_acc, mass);
}
void initializeParticlesCircle(double *x_pos, double *y_pos, double *x_vel, double *y_vel, double *x_acc, double *y_acc, double *mass, dim3 gridSize, dim3 blockSize)
{
hipLaunchKernelGGL(( initialize_particles_circle), dim3(gridSize), dim3(blockSize), 0, 0, x_pos, y_pos, x_vel, y_vel, x_acc, y_acc, mass);
}
void initializeParticles2Circles(double *x_pos, double *y_pos, double *x_vel, double *y_vel, double *x_acc, double *y_acc, double *mass, dim3 gridSize, dim3 blockSize)
{
hipLaunchKernelGGL(( initialize_particles_2_circles), dim3(gridSize), dim3(blockSize), 0, 0, x_pos, y_pos, x_vel, y_vel, x_acc, y_acc, mass);
}
void computeDisplacements(Node *d_tree, double* d_x, double* d_y, double* d_vx, double* d_vy, double* d_ax, double* d_ay, double* d_mass, dim3 gridSize, dim3 blockSize)
{
hipLaunchKernelGGL(( compute_displacements), dim3(gridSize),dim3(blockSize), 0, 0, d_tree, d_x, d_y, d_vx, d_vy, d_ax, d_ay, d_mass);
}
| a251f781c49df4920e1ef86739c8e97a10d83c41.cu | #include "call_kernels.cuh"
// File used to call CUDA kernels from C++ code
void initializeParticlesUni(double *x_pos, double *y_pos, double *x_vel, double *y_vel, double *x_acc, double *y_acc, double *mass, dim3 gridSize, dim3 blockSize)
{
initialize_particles_uni<<< gridSize, blockSize>>>(x_pos, y_pos, x_vel, y_vel, x_acc, y_acc, mass);
}
void initializeParticlesCircle(double *x_pos, double *y_pos, double *x_vel, double *y_vel, double *x_acc, double *y_acc, double *mass, dim3 gridSize, dim3 blockSize)
{
initialize_particles_circle<<< gridSize, blockSize>>>(x_pos, y_pos, x_vel, y_vel, x_acc, y_acc, mass);
}
void initializeParticles2Circles(double *x_pos, double *y_pos, double *x_vel, double *y_vel, double *x_acc, double *y_acc, double *mass, dim3 gridSize, dim3 blockSize)
{
initialize_particles_2_circles<<< gridSize, blockSize>>>(x_pos, y_pos, x_vel, y_vel, x_acc, y_acc, mass);
}
void computeDisplacements(Node *d_tree, double* d_x, double* d_y, double* d_vx, double* d_vy, double* d_ax, double* d_ay, double* d_mass, dim3 gridSize, dim3 blockSize)
{
compute_displacements<<<gridSize,blockSize>>>(d_tree, d_x, d_y, d_vx, d_vy, d_ax, d_ay, d_mass);
}
|
b615ac586d009ed4a58d75f8b3f9c53d10bca6bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sigmoid_layer_hessian_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "../nn_types.h"
static __forceinline__ __device__ float sigmoid(float x)
{
return __fdividef(1.0F, 1.0F + __expf(-x));
}
__global__ void sigmoid_hess_kernel(
const float4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = sigmoid(val.x);
val.y = sigmoid(val.y);
val.z = sigmoid(val.z);
val.w = sigmoid(val.w);
output[elem_id] = val;
}
}
static __forceinline__ __device__ float sigmoid_derivative(float x)
{
return x * (1.0F - x);
}
__global__ void sigmoid_bbprop_hess_kernel(
float4 * __restrict errors,
const float4 * __restrict output_neurons,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = output_neurons[elem_id];
float4 current_error = errors[elem_id];
val.x = sigmoid_derivative(val.x);
val.y = sigmoid_derivative(val.y);
val.z = sigmoid_derivative(val.z);
val.w = sigmoid_derivative(val.w);
current_error.x *= val.x * val.x;
current_error.y *= val.y * val.y;
current_error.z *= val.z * val.z;
current_error.w *= val.w * val.w;
errors[elem_id] = current_error;
}
}
namespace nnforge
{
namespace cuda
{
sigmoid_layer_hessian_cuda::sigmoid_layer_hessian_cuda()
{
}
sigmoid_layer_hessian_cuda::~sigmoid_layer_hessian_cuda()
{
}
void sigmoid_layer_hessian_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( sigmoid_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_neurons_buffer,
*output_neurons_buffer,
elem_count);
}
void sigmoid_layer_hessian_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( sigmoid_bbprop_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_errors_buffer,
*output_neurons_buffer,
elem_count);
}
bool sigmoid_layer_hessian_cuda::is_in_place_backprop() const
{
return true;
}
}
}
| b615ac586d009ed4a58d75f8b3f9c53d10bca6bd.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sigmoid_layer_hessian_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "../nn_types.h"
static __forceinline__ __device__ float sigmoid(float x)
{
return __fdividef(1.0F, 1.0F + __expf(-x));
}
__global__ void sigmoid_hess_kernel(
const float4 * __restrict input,
float4 * __restrict output,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = sigmoid(val.x);
val.y = sigmoid(val.y);
val.z = sigmoid(val.z);
val.w = sigmoid(val.w);
output[elem_id] = val;
}
}
static __forceinline__ __device__ float sigmoid_derivative(float x)
{
return x * (1.0F - x);
}
__global__ void sigmoid_bbprop_hess_kernel(
float4 * __restrict errors,
const float4 * __restrict output_neurons,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = output_neurons[elem_id];
float4 current_error = errors[elem_id];
val.x = sigmoid_derivative(val.x);
val.y = sigmoid_derivative(val.y);
val.z = sigmoid_derivative(val.z);
val.w = sigmoid_derivative(val.w);
current_error.x *= val.x * val.x;
current_error.y *= val.y * val.y;
current_error.z *= val.z * val.z;
current_error.w *= val.w * val.w;
errors[elem_id] = current_error;
}
}
namespace nnforge
{
namespace cuda
{
sigmoid_layer_hessian_cuda::sigmoid_layer_hessian_cuda()
{
}
sigmoid_layer_hessian_cuda::~sigmoid_layer_hessian_cuda()
{
}
void sigmoid_layer_hessian_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
sigmoid_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_neurons_buffer,
*output_neurons_buffer,
elem_count);
}
void sigmoid_layer_hessian_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
sigmoid_bbprop_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_errors_buffer,
*output_neurons_buffer,
elem_count);
}
bool sigmoid_layer_hessian_cuda::is_in_place_backprop() const
{
return true;
}
}
}
|
43d04ecf339a90ae7bc93a6c2e42f4a18b62d732.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <utilities/reduce_helper.h>
#include <device/cuda_utils.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <device/reduce.h>
#include <functions/dev_initializations.h>
void reduce_cublas( real *input, real *output, real *temp, int rows, int cols )
{
/*
int blocks = (rows * cols) / (BLOCK_SIZE * 2 * 8 ) +
(((rows * cols) % (BLOCK_SIZE * 2 * 8) == 0) ? 0 : 1);
reduce6<<< blocks, BLOCK_SIZE, BLOCK_SIZE* sizeof(real) >>>
(input, temp, rows * cols);
hipDeviceSynchronize ();
cudaCheckError ();
reduce6<<< 1, BLOCK_SIZE, BLOCK_SIZE * sizeof(real)>>>
( temp, output, blocks);
hipDeviceSynchronize ();
cudaCheckError ();
*/
//one Vectors
int blocks = cols / BLOCK_SIZE +
(( cols % BLOCK_SIZE ) == 0 ? 0 : 1);
hipLaunchKernelGGL(( kerInitOneVector) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0,
temp, cols);
hipDeviceSynchronize ();
cudaCheckError ();
real alpha = 1, beta = 0;
cublasCheckError( hipblasDgemv( cublasHandle, HIPBLAS_OP_N,
rows, cols, &alpha,
input, rows, temp, 1, &beta,
temp + cols, 1 ) );
cublasCheckError( hipblasDdot( cublasHandle, rows, temp + cols, 1, temp, 1, output ) );
}
| 43d04ecf339a90ae7bc93a6c2e42f4a18b62d732.cu |
#include <utilities/reduce_helper.h>
#include <device/cuda_utils.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <device/reduce.h>
#include <functions/dev_initializations.h>
void reduce_cublas( real *input, real *output, real *temp, int rows, int cols )
{
/*
int blocks = (rows * cols) / (BLOCK_SIZE * 2 * 8 ) +
(((rows * cols) % (BLOCK_SIZE * 2 * 8) == 0) ? 0 : 1);
reduce6<<< blocks, BLOCK_SIZE, BLOCK_SIZE* sizeof(real) >>>
(input, temp, rows * cols);
cudaThreadSynchronize ();
cudaCheckError ();
reduce6<<< 1, BLOCK_SIZE, BLOCK_SIZE * sizeof(real)>>>
( temp, output, blocks);
cudaThreadSynchronize ();
cudaCheckError ();
*/
//one Vectors
int blocks = cols / BLOCK_SIZE +
(( cols % BLOCK_SIZE ) == 0 ? 0 : 1);
kerInitOneVector <<< blocks, BLOCK_SIZE >>>
(temp, cols);
cudaThreadSynchronize ();
cudaCheckError ();
real alpha = 1, beta = 0;
cublasCheckError( cublasDgemv( cublasHandle, CUBLAS_OP_N,
rows, cols, &alpha,
input, rows, temp, 1, &beta,
temp + cols, 1 ) );
cublasCheckError( cublasDdot( cublasHandle, rows, temp + cols, 1, temp, 1, output ) );
}
|
262727087bbf22ac34fedbd57740a42434d27123.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <string>
#include <ctime>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <TObject.h>
#include <TROOT.h>
#include <TFile.h>
#include <TTree.h>
#include <TRandom.h>
#include <TMatrixD.h>
#include <TLorentzVector.h>
#include <TClonesArray.h>
#include <TStopwatch.h>
#include <TTimeStamp.h>
#include <TString.h>
#include "LoadInput.h"
#define nChamberPlanes 30
#define nHodoPlanes 16
#define nPropPlanes 8
#define triggerBit(n) (1 << (n))
#define hitFlagBit(n) (1 << (n))
using namespace std;
const int EstnEvtMax = 10240;
const int THREADS_PER_BLOCK = 512;
int BLOCKS_NUM = EstnEvtMax/THREADS_PER_BLOCK;
const int EstnAHMax = 5000;
const int EstnTHMax = 200;
const int ClusterSizeMax = 100;
class gHit {
public:
int index;
short detectorID;
short elementID;
float tdcTime;
float driftDistance;
float pos;
short flag;
};
class gEvent {
public:
int RunID;
int EventID;
int SpillID;
int TriggerBits;
short TargetPos;
int TurnID;
int RFID;
int Intensity[33];
short TriggerEmu;
short NRoads[4];
int NHits[nChamberPlanes+nHodoPlanes+nPropPlanes+1];
int nAH;
int nTH;
gHit AllHits[EstnAHMax];
gHit TriggerHits[EstnTHMax];
};
struct lessthan {
__host__ __device__ bool operator()(const gHit& lhs, const gHit& rhs)
{
if(lhs.detectorID < rhs.detectorID)
{
return true;
}
else if(lhs.detectorID > rhs.detectorID)
{
return false;
}
if(lhs.elementID < rhs.elementID)
{
return true;
}
else if(lhs.elementID > rhs.elementID)
{
return false;
}
if(lhs.tdcTime > rhs.tdcTime)
{
return true;
}
else
{
return false;
}
}
};
__global__ void gkernel(gEvent* ic, int* og) {
// printf("Running the kernel function...\n");
int index = threadIdx.x + blockIdx.x * blockDim.x;
double w_max[EstnEvtMax];
double w_min[EstnEvtMax];
double dt_mean[EstnEvtMax];
int cluster_iAH_arr_cur[EstnEvtMax];
int cluster_iAH_arr_size[EstnEvtMax];
static int cluster_iAH_arr[EstnEvtMax][ClusterSizeMax];
int uniqueID[EstnEvtMax];
int uniqueID_curr[EstnEvtMax];
double tdcTime_curr[EstnEvtMax];
int iAH[EstnEvtMax];
int nAH_reduced[EstnEvtMax];
cluster_iAH_arr_size[index] = 0;
nAH_reduced[index] = 0;
for(iAH[index] = 0; iAH[index]<ic[index].nAH; ++iAH[index]) {
if((ic[index].AllHits[iAH[index]].flag & hitFlagBit(1)) == 0) {
// printf("Skip out-of-time...\n");
ic[index].AllHits[iAH[index]].detectorID = 0;
continue;
}
if(ic[index].AllHits[iAH[index]].detectorID < 31 || ic[index].AllHits[iAH[index]].detectorID > 46) {
uniqueID[index] = ic[index].AllHits[iAH[index]].detectorID*1000 + ic[index].AllHits[iAH[index]].elementID;
if(uniqueID[index] != uniqueID_curr[index]) {
uniqueID_curr[index] = uniqueID[index];
tdcTime_curr[index] = ic[index].AllHits[iAH[index]].tdcTime;
}
else {
if(ic[index].AllHits[iAH[index]].detectorID > 36 || ((ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] >= 0.0) && (ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] < 80.0)) || ((ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] <= 0.0) && (ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] > -80.0))) {
// printf("Skip after-pulse...\n");
ic[index].AllHits[iAH[index]].detectorID = 0;
continue;
}
else {
tdcTime_curr[index] = ic[index].AllHits[iAH[index]].tdcTime;
}
}
}
if(ic[index].AllHits[iAH[index]].detectorID <= nChamberPlanes) {
// printf("%d\n", cluster_iAH_arr_size[index]);
// printf("Decluster...\n");
if(cluster_iAH_arr_size[index] == ClusterSizeMax) {
// printf("Oversized cluster...\n");
}
if(cluster_iAH_arr_size[index] == 0) {
cluster_iAH_arr[index][0] = iAH[index];
++cluster_iAH_arr_size[index];
}
else {
if((ic[index].AllHits[iAH[index]].detectorID != ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].detectorID) || (ic[index].AllHits[iAH[index]].elementID - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].elementID > 1)) {
if(cluster_iAH_arr_size[index] == 2) {
w_max[index] = 0.9*0.5*(ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].pos - ic[index].AllHits[cluster_iAH_arr[index][0]].pos);
w_min[index] = 4.0/9.0*w_max[index];
if((ic[index].AllHits[cluster_iAH_arr[index][0]].driftDistance > w_max[index] && ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].driftDistance > w_min[index]) || (ic[index].AllHits[cluster_iAH_arr[index][0]].driftDistance > w_min[index] && ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].driftDistance > w_max[index])) {
if(ic[index].AllHits[cluster_iAH_arr[index][0]].driftDistance > ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].driftDistance) {
// printf("Skip cluster...\n");
ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID = 0;
}
else {
// printf("Skip cluster...\n");
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].detectorID = 0;
}
}
else if((((ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) >= 0.0 && (ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) < 8.0) || ((ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) <= 0.0 && (ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) > -8.0)) && (ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID >= 19 && ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID <= 24)) {
// printf("Skip cluster...\n");
ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID = 0;
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].detectorID = 0;
}
}
if(cluster_iAH_arr_size[index] >= 3) {
dt_mean[index] = 0.0;
for(cluster_iAH_arr_cur[index] = 1; cluster_iAH_arr_cur[index] < cluster_iAH_arr_size[index]; ++cluster_iAH_arr_cur[index]) {
dt_mean[index] += ((ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]-1]].tdcTime) > 0.0 ? (ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]-1]].tdcTime) : (ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]-1]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].tdcTime));
}
dt_mean[index] = dt_mean[index]/(cluster_iAH_arr_size[index] - 1);
if(dt_mean[index] < 10.0) {
// printf("Skip cluster...\n");
for(cluster_iAH_arr_cur[index] = 0; cluster_iAH_arr_cur[index] < cluster_iAH_arr_size[index]; ++cluster_iAH_arr_cur[index]) {
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].detectorID = 0;
}
}
else {
// printf("Skip cluster...\n");
for(cluster_iAH_arr_cur[index] = 1; cluster_iAH_arr_cur[index] < cluster_iAH_arr_size[index]; ++cluster_iAH_arr_cur[index]) {
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].detectorID = 0;
}
}
}
cluster_iAH_arr_size[index] = 0;
}
cluster_iAH_arr[index][cluster_iAH_arr_size[index]] = iAH[index];
++cluster_iAH_arr_size[index];
}
}
}
for(iAH[index] = 0; iAH[index]<ic[index].nAH; ++iAH[index]) {
if(ic[index].AllHits[iAH[index]].detectorID != 0) {
ic[index].AllHits[nAH_reduced[index]] = ic[index].AllHits[iAH[index]];
++nAH_reduced[index];
}
}
ic[index].nAH = nAH_reduced[index];
if(((ic[index].NHits[1]+ic[index].NHits[2]+ic[index].NHits[3]+ic[index].NHits[4]+ic[index].NHits[5]+ic[index].NHits[6])>350) || ((ic[index].NHits[7]+ic[index].NHits[8]+ic[index].NHits[9]+ic[index].NHits[10]+ic[index].NHits[11]+ic[index].NHits[12])>350) || ((ic[index].NHits[13]+ic[index].NHits[14]+ic[index].NHits[15]+ic[index].NHits[16]+ic[index].NHits[17]+ic[index].NHits[18])>170) || ((ic[index].NHits[19]+ic[index].NHits[20]+ic[index].NHits[21]+ic[index].NHits[22]+ic[index].NHits[23]+ic[index].NHits[24])>140) || ((ic[index].NHits[25]+ic[index].NHits[26]+ic[index].NHits[27]+ic[index].NHits[28]+ic[index].NHits[29]+ic[index].NHits[30])>140)) {
// printf("Event rejected...\n");
}
else {
// Process the accepted events (tracking) here.
}
}
int main(int argc, char* argv[]) {
auto start = std::chrono::system_clock::now();
clock_t cp1 = clock();
TString inputFile;
TString outputFile;
inputFile = argv[1];
outputFile = argv[2];
cout<<"Running "<<argv[0]<<endl;
cout<<"Loading "<<argv[1]<<endl;
cout<<"Writing "<<argv[2]<<endl;
SRawEvent* rawEvent = new SRawEvent();
TFile* dataFile = new TFile(inputFile.Data(), "READ");
TTree* dataTree = (TTree *)dataFile->Get("save");
dataTree->SetBranchAddress("rawEvent", &rawEvent);
int nEvtMax = dataTree->GetEntries();
static gEvent host_gEvent[EstnEvtMax];
for(int i = 0; i < nEvtMax; ++i) {
dataTree->GetEntry(i);
// cout<<"Converting "<<i<<"/"<<nEvtMax<<endl;
host_gEvent[i].RunID = rawEvent->fRunID;
host_gEvent[i].EventID = rawEvent->fEventID;
host_gEvent[i].SpillID = rawEvent->fSpillID;
host_gEvent[i].TriggerBits = rawEvent->fTriggerBits;
host_gEvent[i].TargetPos = rawEvent->fTargetPos;
host_gEvent[i].TurnID = rawEvent->fTurnID;
host_gEvent[i].RFID = rawEvent->fRFID;
for(int j=0; j<33; j++) {
host_gEvent[i].Intensity[j] = rawEvent->fIntensity[j];
}
host_gEvent[i].TriggerEmu = rawEvent->fTriggerEmu;
for(int k=0; k<4; k++) {
host_gEvent[i].NRoads[k] = rawEvent->fNRoads[k];
}
for(int l=0; l<(nChamberPlanes+nHodoPlanes+nPropPlanes+1); l++) {
host_gEvent[i].NHits[l] = rawEvent->fNHits[l];
}
host_gEvent[i].nAH = rawEvent->fAllHits.size();
host_gEvent[i].nTH = rawEvent->fTriggerHits.size();
for(int m=0; m<rawEvent->fAllHits.size(); m++) {
host_gEvent[i].AllHits[m].index=(rawEvent->fAllHits[m]).index;
host_gEvent[i].AllHits[m].detectorID=(rawEvent->fAllHits[m]).detectorID;
host_gEvent[i].AllHits[m].elementID=(rawEvent->fAllHits[m]).elementID;
host_gEvent[i].AllHits[m].tdcTime=(rawEvent->fAllHits[m]).tdcTime;
host_gEvent[i].AllHits[m].driftDistance=(rawEvent->fAllHits[m]).driftDistance;
host_gEvent[i].AllHits[m].pos=(rawEvent->fAllHits[m]).pos;
host_gEvent[i].AllHits[m].flag=(rawEvent->fAllHits[m]).flag;
}
for(int n=0; n<rawEvent->fTriggerHits.size(); n++) {
host_gEvent[i].TriggerHits[n].index=(rawEvent->fTriggerHits[n]).index;
host_gEvent[i].TriggerHits[n].detectorID=(rawEvent->fTriggerHits[n]).detectorID;
host_gEvent[i].TriggerHits[n].elementID=(rawEvent->fTriggerHits[n]).elementID;
host_gEvent[i].TriggerHits[n].tdcTime=(rawEvent->fTriggerHits[n]).tdcTime;
host_gEvent[i].TriggerHits[n].driftDistance=(rawEvent->fTriggerHits[n]).driftDistance;
host_gEvent[i].TriggerHits[n].pos=(rawEvent->fTriggerHits[n]).pos;
host_gEvent[i].TriggerHits[n].flag=(rawEvent->fTriggerHits[n]).flag;
}
}
//If the decoded has NOT been sorted...
// for(int i = 0; i < nEvtMax; ++i) {
// thrust::stable_sort(host_gEvent[i].AllHits, host_gEvent[i].AllHits+host_gEvent[i].nAH, lessthan());
// }
gEvent *device_gEvent;
int sizeofRaw = EstnEvtMax*sizeof(gEvent);
int host_output[EstnEvtMax];
int *device_output;
int sizeofoutput = EstnEvtMax*sizeof(int);
clock_t cp2 = clock();
auto start_kernel = std::chrono::system_clock::now();
hipMalloc((void**)&device_gEvent, sizeofRaw);
hipMalloc((void**)&device_output, sizeofoutput);
hipMemcpy(device_gEvent, host_gEvent, sizeofRaw, hipMemcpyHostToDevice);
hipMemcpy(device_output, host_output, sizeofoutput, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gkernel), dim3(BLOCKS_NUM),dim3(THREADS_PER_BLOCK), 0, 0, device_gEvent, device_output);
hipMemcpy(host_gEvent, device_gEvent, sizeofRaw, hipMemcpyDeviceToHost);
hipMemcpy(host_output, device_output, sizeofoutput, hipMemcpyDeviceToHost);
hipFree(device_gEvent);
hipFree(device_output);
auto end_kernel = std::chrono::system_clock::now();
clock_t cp3 = clock();
delete rawEvent;
for(int i = 0; i < host_gEvent[0].nAH; ++i) {
cout<<"output: "<<(host_gEvent[0].AllHits[i].detectorID)<<endl;
}
clock_t cp4 = clock();
auto end = std::chrono::system_clock::now();
double cpu_secs = double(cp4-cp3+cp2-cp1) / CLOCKS_PER_SEC;
auto gpu_ns = end_kernel - start_kernel;
auto overall = end - start;
cout<<"CPU time: "<<cpu_secs<<endl;
cout<<"GPU time: "<<(gpu_ns.count()/1000000000.0)<<endl;
cout<<"Total time: "<<(overall.count()/1000000000.0)<<endl;
return 0;
}
//e906-gat2:/seaquest/users/hjiang/online_reconstruction
| 262727087bbf22ac34fedbd57740a42434d27123.cu | #include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cmath>
#include <algorithm>
#include <string>
#include <ctime>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <TObject.h>
#include <TROOT.h>
#include <TFile.h>
#include <TTree.h>
#include <TRandom.h>
#include <TMatrixD.h>
#include <TLorentzVector.h>
#include <TClonesArray.h>
#include <TStopwatch.h>
#include <TTimeStamp.h>
#include <TString.h>
#include "LoadInput.h"
#define nChamberPlanes 30
#define nHodoPlanes 16
#define nPropPlanes 8
#define triggerBit(n) (1 << (n))
#define hitFlagBit(n) (1 << (n))
using namespace std;
const int EstnEvtMax = 10240;
const int THREADS_PER_BLOCK = 512;
int BLOCKS_NUM = EstnEvtMax/THREADS_PER_BLOCK;
const int EstnAHMax = 5000;
const int EstnTHMax = 200;
const int ClusterSizeMax = 100;
class gHit {
public:
int index;
short detectorID;
short elementID;
float tdcTime;
float driftDistance;
float pos;
short flag;
};
class gEvent {
public:
int RunID;
int EventID;
int SpillID;
int TriggerBits;
short TargetPos;
int TurnID;
int RFID;
int Intensity[33];
short TriggerEmu;
short NRoads[4];
int NHits[nChamberPlanes+nHodoPlanes+nPropPlanes+1];
int nAH;
int nTH;
gHit AllHits[EstnAHMax];
gHit TriggerHits[EstnTHMax];
};
struct lessthan {
__host__ __device__ bool operator()(const gHit& lhs, const gHit& rhs)
{
if(lhs.detectorID < rhs.detectorID)
{
return true;
}
else if(lhs.detectorID > rhs.detectorID)
{
return false;
}
if(lhs.elementID < rhs.elementID)
{
return true;
}
else if(lhs.elementID > rhs.elementID)
{
return false;
}
if(lhs.tdcTime > rhs.tdcTime)
{
return true;
}
else
{
return false;
}
}
};
__global__ void gkernel(gEvent* ic, int* og) {
// printf("Running the kernel function...\n");
int index = threadIdx.x + blockIdx.x * blockDim.x;
double w_max[EstnEvtMax];
double w_min[EstnEvtMax];
double dt_mean[EstnEvtMax];
int cluster_iAH_arr_cur[EstnEvtMax];
int cluster_iAH_arr_size[EstnEvtMax];
static int cluster_iAH_arr[EstnEvtMax][ClusterSizeMax];
int uniqueID[EstnEvtMax];
int uniqueID_curr[EstnEvtMax];
double tdcTime_curr[EstnEvtMax];
int iAH[EstnEvtMax];
int nAH_reduced[EstnEvtMax];
cluster_iAH_arr_size[index] = 0;
nAH_reduced[index] = 0;
for(iAH[index] = 0; iAH[index]<ic[index].nAH; ++iAH[index]) {
if((ic[index].AllHits[iAH[index]].flag & hitFlagBit(1)) == 0) {
// printf("Skip out-of-time...\n");
ic[index].AllHits[iAH[index]].detectorID = 0;
continue;
}
if(ic[index].AllHits[iAH[index]].detectorID < 31 || ic[index].AllHits[iAH[index]].detectorID > 46) {
uniqueID[index] = ic[index].AllHits[iAH[index]].detectorID*1000 + ic[index].AllHits[iAH[index]].elementID;
if(uniqueID[index] != uniqueID_curr[index]) {
uniqueID_curr[index] = uniqueID[index];
tdcTime_curr[index] = ic[index].AllHits[iAH[index]].tdcTime;
}
else {
if(ic[index].AllHits[iAH[index]].detectorID > 36 || ((ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] >= 0.0) && (ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] < 80.0)) || ((ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] <= 0.0) && (ic[index].AllHits[iAH[index]].tdcTime - tdcTime_curr[index] > -80.0))) {
// printf("Skip after-pulse...\n");
ic[index].AllHits[iAH[index]].detectorID = 0;
continue;
}
else {
tdcTime_curr[index] = ic[index].AllHits[iAH[index]].tdcTime;
}
}
}
if(ic[index].AllHits[iAH[index]].detectorID <= nChamberPlanes) {
// printf("%d\n", cluster_iAH_arr_size[index]);
// printf("Decluster...\n");
if(cluster_iAH_arr_size[index] == ClusterSizeMax) {
// printf("Oversized cluster...\n");
}
if(cluster_iAH_arr_size[index] == 0) {
cluster_iAH_arr[index][0] = iAH[index];
++cluster_iAH_arr_size[index];
}
else {
if((ic[index].AllHits[iAH[index]].detectorID != ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].detectorID) || (ic[index].AllHits[iAH[index]].elementID - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].elementID > 1)) {
if(cluster_iAH_arr_size[index] == 2) {
w_max[index] = 0.9*0.5*(ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].pos - ic[index].AllHits[cluster_iAH_arr[index][0]].pos);
w_min[index] = 4.0/9.0*w_max[index];
if((ic[index].AllHits[cluster_iAH_arr[index][0]].driftDistance > w_max[index] && ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].driftDistance > w_min[index]) || (ic[index].AllHits[cluster_iAH_arr[index][0]].driftDistance > w_min[index] && ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].driftDistance > w_max[index])) {
if(ic[index].AllHits[cluster_iAH_arr[index][0]].driftDistance > ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].driftDistance) {
// printf("Skip cluster...\n");
ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID = 0;
}
else {
// printf("Skip cluster...\n");
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].detectorID = 0;
}
}
else if((((ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) >= 0.0 && (ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) < 8.0) || ((ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) <= 0.0 && (ic[index].AllHits[cluster_iAH_arr[index][0]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].tdcTime) > -8.0)) && (ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID >= 19 && ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID <= 24)) {
// printf("Skip cluster...\n");
ic[index].AllHits[cluster_iAH_arr[index][0]].detectorID = 0;
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_size[index]-1]].detectorID = 0;
}
}
if(cluster_iAH_arr_size[index] >= 3) {
dt_mean[index] = 0.0;
for(cluster_iAH_arr_cur[index] = 1; cluster_iAH_arr_cur[index] < cluster_iAH_arr_size[index]; ++cluster_iAH_arr_cur[index]) {
dt_mean[index] += ((ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]-1]].tdcTime) > 0.0 ? (ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]-1]].tdcTime) : (ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]-1]].tdcTime - ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].tdcTime));
}
dt_mean[index] = dt_mean[index]/(cluster_iAH_arr_size[index] - 1);
if(dt_mean[index] < 10.0) {
// printf("Skip cluster...\n");
for(cluster_iAH_arr_cur[index] = 0; cluster_iAH_arr_cur[index] < cluster_iAH_arr_size[index]; ++cluster_iAH_arr_cur[index]) {
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].detectorID = 0;
}
}
else {
// printf("Skip cluster...\n");
for(cluster_iAH_arr_cur[index] = 1; cluster_iAH_arr_cur[index] < cluster_iAH_arr_size[index]; ++cluster_iAH_arr_cur[index]) {
ic[index].AllHits[cluster_iAH_arr[index][cluster_iAH_arr_cur[index]]].detectorID = 0;
}
}
}
cluster_iAH_arr_size[index] = 0;
}
cluster_iAH_arr[index][cluster_iAH_arr_size[index]] = iAH[index];
++cluster_iAH_arr_size[index];
}
}
}
for(iAH[index] = 0; iAH[index]<ic[index].nAH; ++iAH[index]) {
if(ic[index].AllHits[iAH[index]].detectorID != 0) {
ic[index].AllHits[nAH_reduced[index]] = ic[index].AllHits[iAH[index]];
++nAH_reduced[index];
}
}
ic[index].nAH = nAH_reduced[index];
if(((ic[index].NHits[1]+ic[index].NHits[2]+ic[index].NHits[3]+ic[index].NHits[4]+ic[index].NHits[5]+ic[index].NHits[6])>350) || ((ic[index].NHits[7]+ic[index].NHits[8]+ic[index].NHits[9]+ic[index].NHits[10]+ic[index].NHits[11]+ic[index].NHits[12])>350) || ((ic[index].NHits[13]+ic[index].NHits[14]+ic[index].NHits[15]+ic[index].NHits[16]+ic[index].NHits[17]+ic[index].NHits[18])>170) || ((ic[index].NHits[19]+ic[index].NHits[20]+ic[index].NHits[21]+ic[index].NHits[22]+ic[index].NHits[23]+ic[index].NHits[24])>140) || ((ic[index].NHits[25]+ic[index].NHits[26]+ic[index].NHits[27]+ic[index].NHits[28]+ic[index].NHits[29]+ic[index].NHits[30])>140)) {
// printf("Event rejected...\n");
}
else {
// Process the accepted events (tracking) here.
}
}
int main(int argc, char* argv[]) {
auto start = std::chrono::system_clock::now();
clock_t cp1 = clock();
TString inputFile;
TString outputFile;
inputFile = argv[1];
outputFile = argv[2];
cout<<"Running "<<argv[0]<<endl;
cout<<"Loading "<<argv[1]<<endl;
cout<<"Writing "<<argv[2]<<endl;
SRawEvent* rawEvent = new SRawEvent();
TFile* dataFile = new TFile(inputFile.Data(), "READ");
TTree* dataTree = (TTree *)dataFile->Get("save");
dataTree->SetBranchAddress("rawEvent", &rawEvent);
int nEvtMax = dataTree->GetEntries();
static gEvent host_gEvent[EstnEvtMax];
for(int i = 0; i < nEvtMax; ++i) {
dataTree->GetEntry(i);
// cout<<"Converting "<<i<<"/"<<nEvtMax<<endl;
host_gEvent[i].RunID = rawEvent->fRunID;
host_gEvent[i].EventID = rawEvent->fEventID;
host_gEvent[i].SpillID = rawEvent->fSpillID;
host_gEvent[i].TriggerBits = rawEvent->fTriggerBits;
host_gEvent[i].TargetPos = rawEvent->fTargetPos;
host_gEvent[i].TurnID = rawEvent->fTurnID;
host_gEvent[i].RFID = rawEvent->fRFID;
for(int j=0; j<33; j++) {
host_gEvent[i].Intensity[j] = rawEvent->fIntensity[j];
}
host_gEvent[i].TriggerEmu = rawEvent->fTriggerEmu;
for(int k=0; k<4; k++) {
host_gEvent[i].NRoads[k] = rawEvent->fNRoads[k];
}
for(int l=0; l<(nChamberPlanes+nHodoPlanes+nPropPlanes+1); l++) {
host_gEvent[i].NHits[l] = rawEvent->fNHits[l];
}
host_gEvent[i].nAH = rawEvent->fAllHits.size();
host_gEvent[i].nTH = rawEvent->fTriggerHits.size();
for(int m=0; m<rawEvent->fAllHits.size(); m++) {
host_gEvent[i].AllHits[m].index=(rawEvent->fAllHits[m]).index;
host_gEvent[i].AllHits[m].detectorID=(rawEvent->fAllHits[m]).detectorID;
host_gEvent[i].AllHits[m].elementID=(rawEvent->fAllHits[m]).elementID;
host_gEvent[i].AllHits[m].tdcTime=(rawEvent->fAllHits[m]).tdcTime;
host_gEvent[i].AllHits[m].driftDistance=(rawEvent->fAllHits[m]).driftDistance;
host_gEvent[i].AllHits[m].pos=(rawEvent->fAllHits[m]).pos;
host_gEvent[i].AllHits[m].flag=(rawEvent->fAllHits[m]).flag;
}
for(int n=0; n<rawEvent->fTriggerHits.size(); n++) {
host_gEvent[i].TriggerHits[n].index=(rawEvent->fTriggerHits[n]).index;
host_gEvent[i].TriggerHits[n].detectorID=(rawEvent->fTriggerHits[n]).detectorID;
host_gEvent[i].TriggerHits[n].elementID=(rawEvent->fTriggerHits[n]).elementID;
host_gEvent[i].TriggerHits[n].tdcTime=(rawEvent->fTriggerHits[n]).tdcTime;
host_gEvent[i].TriggerHits[n].driftDistance=(rawEvent->fTriggerHits[n]).driftDistance;
host_gEvent[i].TriggerHits[n].pos=(rawEvent->fTriggerHits[n]).pos;
host_gEvent[i].TriggerHits[n].flag=(rawEvent->fTriggerHits[n]).flag;
}
}
//If the decoded has NOT been sorted...
// for(int i = 0; i < nEvtMax; ++i) {
// thrust::stable_sort(host_gEvent[i].AllHits, host_gEvent[i].AllHits+host_gEvent[i].nAH, lessthan());
// }
gEvent *device_gEvent;
int sizeofRaw = EstnEvtMax*sizeof(gEvent);
int host_output[EstnEvtMax];
int *device_output;
int sizeofoutput = EstnEvtMax*sizeof(int);
clock_t cp2 = clock();
auto start_kernel = std::chrono::system_clock::now();
cudaMalloc((void**)&device_gEvent, sizeofRaw);
cudaMalloc((void**)&device_output, sizeofoutput);
cudaMemcpy(device_gEvent, host_gEvent, sizeofRaw, cudaMemcpyHostToDevice);
cudaMemcpy(device_output, host_output, sizeofoutput, cudaMemcpyHostToDevice);
gkernel<<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(device_gEvent, device_output);
cudaMemcpy(host_gEvent, device_gEvent, sizeofRaw, cudaMemcpyDeviceToHost);
cudaMemcpy(host_output, device_output, sizeofoutput, cudaMemcpyDeviceToHost);
cudaFree(device_gEvent);
cudaFree(device_output);
auto end_kernel = std::chrono::system_clock::now();
clock_t cp3 = clock();
delete rawEvent;
for(int i = 0; i < host_gEvent[0].nAH; ++i) {
cout<<"output: "<<(host_gEvent[0].AllHits[i].detectorID)<<endl;
}
clock_t cp4 = clock();
auto end = std::chrono::system_clock::now();
double cpu_secs = double(cp4-cp3+cp2-cp1) / CLOCKS_PER_SEC;
auto gpu_ns = end_kernel - start_kernel;
auto overall = end - start;
cout<<"CPU time: "<<cpu_secs<<endl;
cout<<"GPU time: "<<(gpu_ns.count()/1000000000.0)<<endl;
cout<<"Total time: "<<(overall.count()/1000000000.0)<<endl;
return 0;
}
//e906-gat2:/seaquest/users/hjiang/online_reconstruction
|
1ef68b65b7d08ac1907ae0d7f342ef6ffd33d635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "gpuerrchk.cuh"
#include "real.h"
#include "next_pow_2.h"
#include <assert.h>
#include <iostream>
__global__ void ch8_longer_scan_kernel1(real* X, real* Y,real* S,unsigned int S_length, unsigned int treesize){
extern __shared__ real XY[];
int i=2*blockIdx.x*blockDim.x+threadIdx.x;
XY[threadIdx.x]=X[i];
XY[threadIdx.x+blockDim.x]=X[i+blockDim.x];
//up-sweep
for (int stride=1; stride <= blockDim.x; stride*=2){
__syncthreads();
int index= 2*(threadIdx.x+1)*stride -1;
if (index< treesize) XY[index]+=XY[index-stride];
}
//down-sweep
for (int stride=treesize/2; stride>=1; stride/=2){
__syncthreads();
int index = 2*(threadIdx.x+1)*stride-1;
if (index+stride < treesize){
XY[index+stride]+=XY[index];
}
}
__syncthreads();
Y[i]= XY[threadIdx.x];
Y[i+blockDim.x]= XY[threadIdx.x+blockDim.x];
if(threadIdx.x == blockDim.x-1)
S[blockIdx.x]= XY[treesize-1];
}
//performs an in-place scan on S
//full s length must be a power of 2
__global__ void ch8_longer_scan_kernel2(real* S, unsigned int full_S_length){
extern __shared__ real XY[];
int i=2*blockIdx.x*blockDim.x+threadIdx.x;
XY[threadIdx.x]=S[i];
XY[threadIdx.x+blockDim.x]=S[i+blockDim.x];
//up-sweep
for (int stride=1; stride <= blockDim.x; stride*=2){
__syncthreads();
int index= 2*(threadIdx.x+1)*stride -1;
if (index< full_S_length) XY[index]+=XY[index-stride];
}
//down-sweep
for (int stride=full_S_length/2; stride>=1; stride/=2){
__syncthreads();
int index = 2*(threadIdx.x+1)*stride-1;
if (index+stride < full_S_length){
XY[index+stride]+=XY[index];
}
}
__syncthreads();
S[i]= XY[threadIdx.x];
S[i+blockDim.x]=XY[threadIdx.x+blockDim.x];
}
__global__ void ch8_longer_scan_kernel3(real* Y, real* S){
int i=2*blockIdx.x*blockDim.x+threadIdx.x;
if (blockIdx.x>0) {
Y[i]+=S[blockIdx.x-1];
Y[i+blockDim.x]+=S[blockIdx.x-1];
}
}
//NEED TO DO MULTISTAGE SCAN ENTIRELY ON DEVICE, taking device input and writing device output
//treesize is assumed to be a power of 2. d_X and d_Y are assumed to be of length length.
//also assume treesize*(S_length)=length
//also assume d_S is small enough to be scanned by one thread block.
//also assume d_S points to an array with length equal to S_length rounded to the next power of 2
void ch8_longer_scan(real* d_X, real* d_Y,real* d_S, size_t length, unsigned int S_length, unsigned int treesize){
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0); //assume we are using device 0
size_t share_mem=dev_prop.sharedMemPerBlock;
int thread_limit= dev_prop.maxThreadsPerBlock;
size_t max_per_block=share_mem/sizeof(real);
assert(treesize<=max_per_block && treesize<=2*thread_limit);
assert(treesize*(S_length)==length);
hipLaunchKernelGGL(( ch8_longer_scan_kernel1), dim3(S_length), dim3(treesize/2), treesize*sizeof(real), 0, d_X, d_Y, d_S, S_length, treesize);
gpuErrchk(hipPeekAtLastError());
//debugging
// real Y[2048];
// gpuErrchk(hipMemcpy(Y,d_Y,sizeof(real)*2048,hipMemcpyDeviceToHost));
// for(int i=0; i<2048; i++)
// std::cout << "i=" << i << " Y[i]=" << Y[i] <<std::endl;
unsigned int full_S_length=next_pow_2(S_length);
assert(full_S_length <= max_per_block && full_S_length<=2*thread_limit);
hipLaunchKernelGGL(( ch8_longer_scan_kernel2), dim3(1),dim3(full_S_length/2),sizeof(real)*full_S_length, 0, d_S,full_S_length);
gpuErrchk(hipPeekAtLastError());
hipLaunchKernelGGL(( ch8_longer_scan_kernel3), dim3(S_length),dim3(treesize/2), treesize*sizeof(real), 0, d_Y,d_S);
gpuErrchk(hipPeekAtLastError());
}
| 1ef68b65b7d08ac1907ae0d7f342ef6ffd33d635.cu | #include <math.h>
#include "gpuerrchk.cuh"
#include "real.h"
#include "next_pow_2.h"
#include <assert.h>
#include <iostream>
__global__ void ch8_longer_scan_kernel1(real* X, real* Y,real* S,unsigned int S_length, unsigned int treesize){
extern __shared__ real XY[];
int i=2*blockIdx.x*blockDim.x+threadIdx.x;
XY[threadIdx.x]=X[i];
XY[threadIdx.x+blockDim.x]=X[i+blockDim.x];
//up-sweep
for (int stride=1; stride <= blockDim.x; stride*=2){
__syncthreads();
int index= 2*(threadIdx.x+1)*stride -1;
if (index< treesize) XY[index]+=XY[index-stride];
}
//down-sweep
for (int stride=treesize/2; stride>=1; stride/=2){
__syncthreads();
int index = 2*(threadIdx.x+1)*stride-1;
if (index+stride < treesize){
XY[index+stride]+=XY[index];
}
}
__syncthreads();
Y[i]= XY[threadIdx.x];
Y[i+blockDim.x]= XY[threadIdx.x+blockDim.x];
if(threadIdx.x == blockDim.x-1)
S[blockIdx.x]= XY[treesize-1];
}
//performs an in-place scan on S
//full s length must be a power of 2
__global__ void ch8_longer_scan_kernel2(real* S, unsigned int full_S_length){
extern __shared__ real XY[];
int i=2*blockIdx.x*blockDim.x+threadIdx.x;
XY[threadIdx.x]=S[i];
XY[threadIdx.x+blockDim.x]=S[i+blockDim.x];
//up-sweep
for (int stride=1; stride <= blockDim.x; stride*=2){
__syncthreads();
int index= 2*(threadIdx.x+1)*stride -1;
if (index< full_S_length) XY[index]+=XY[index-stride];
}
//down-sweep
for (int stride=full_S_length/2; stride>=1; stride/=2){
__syncthreads();
int index = 2*(threadIdx.x+1)*stride-1;
if (index+stride < full_S_length){
XY[index+stride]+=XY[index];
}
}
__syncthreads();
S[i]= XY[threadIdx.x];
S[i+blockDim.x]=XY[threadIdx.x+blockDim.x];
}
__global__ void ch8_longer_scan_kernel3(real* Y, real* S){
int i=2*blockIdx.x*blockDim.x+threadIdx.x;
if (blockIdx.x>0) {
Y[i]+=S[blockIdx.x-1];
Y[i+blockDim.x]+=S[blockIdx.x-1];
}
}
//NEED TO DO MULTISTAGE SCAN ENTIRELY ON DEVICE, taking device input and writing device output
//treesize is assumed to be a power of 2. d_X and d_Y are assumed to be of length length.
//also assume treesize*(S_length)=length
//also assume d_S is small enough to be scanned by one thread block.
//also assume d_S points to an array with length equal to S_length rounded to the next power of 2
void ch8_longer_scan(real* d_X, real* d_Y,real* d_S, size_t length, unsigned int S_length, unsigned int treesize){
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0); //assume we are using device 0
size_t share_mem=dev_prop.sharedMemPerBlock;
int thread_limit= dev_prop.maxThreadsPerBlock;
size_t max_per_block=share_mem/sizeof(real);
assert(treesize<=max_per_block && treesize<=2*thread_limit);
assert(treesize*(S_length)==length);
ch8_longer_scan_kernel1<<<S_length, treesize/2, treesize*sizeof(real)>>>(d_X, d_Y, d_S, S_length, treesize);
gpuErrchk(cudaPeekAtLastError());
//debugging
// real Y[2048];
// gpuErrchk(cudaMemcpy(Y,d_Y,sizeof(real)*2048,cudaMemcpyDeviceToHost));
// for(int i=0; i<2048; i++)
// std::cout << "i=" << i << " Y[i]=" << Y[i] <<std::endl;
unsigned int full_S_length=next_pow_2(S_length);
assert(full_S_length <= max_per_block && full_S_length<=2*thread_limit);
ch8_longer_scan_kernel2<<<1,full_S_length/2,sizeof(real)*full_S_length>>>(d_S,full_S_length);
gpuErrchk(cudaPeekAtLastError());
ch8_longer_scan_kernel3<<<S_length,treesize/2, treesize*sizeof(real)>>>(d_Y,d_S);
gpuErrchk(cudaPeekAtLastError());
}
|
5def301feb075b5e9a56da0a862e4192325d303c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void multiply(double *a, double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0;
if( col < k && row < m)
{
int rowIndex = row * n;
for(int i = 0; i < n; i++)
{
sum += a[rowIndex + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} | 5def301feb075b5e9a56da0a862e4192325d303c.cu | extern "C"
__global__ void multiply(double *a, double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0;
if( col < k && row < m)
{
int rowIndex = row * n;
for(int i = 0; i < n; i++)
{
sum += a[rowIndex + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} |
be79a07ee4d000630687713ca520fe3b0222bb17.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<string.h>
#include<rocblas.h>
#include<R.h>
#include<Rinternals.h>
#include"cuseful.h"
#define HALF RAND_MAX/2
int hasCudaError(const char * msg) {
hipError_t err = hipGetLastError();
if(hipSuccess != err)
error("cuda error : %s : %s\n", msg, hipGetErrorString(err));
return 0;
}
void checkCudaError(const char * msg) {
hipError_t err = hipGetLastError();
if(hipSuccess != err) {
if(msg != NULL)
warning(msg);
error(hipGetErrorString(err));
}
}
char * cublasGetErrorString(cublasStatus err)
{
switch(err) {
case HIPBLAS_STATUS_SUCCESS :
return "operation completed successfully";
case HIPBLAS_STATUS_NOT_INITIALIZED :
return "CUBLAS library not initialized";
case HIPBLAS_STATUS_ALLOC_FAILED :
return "resource allocation failed";
case HIPBLAS_STATUS_INVALID_VALUE :
return "unsupported numerical value was passed to function";
case HIPBLAS_STATUS_ARCH_MISMATCH :
return "function requires an architectural feature absent from \
the architecture of the device";
case HIPBLAS_STATUS_MAPPING_ERROR :
return "access to GPU memory space failed";
case HIPBLAS_STATUS_EXECUTION_FAILED :
return "GPU program failed to execute";
case HIPBLAS_STATUS_INTERNAL_ERROR :
return "an internal CUBLAS operation failed";
default :
return "unknown error type";
}
}
void checkCublasError(const char * msg)
{
cublasStatus err = hipblasGetError();
if(err != HIPBLAS_STATUS_SUCCESS)
error("cublas error : %s : %s\n", msg, cublasGetErrorString(err));
}
int hasCublasError(const char * msg)
{
cublasStatus err = hipblasGetError();
if(err != HIPBLAS_STATUS_SUCCESS)
error("cublas error : %s : %s\n", msg, cublasGetErrorString(err));
return 0;
}
| be79a07ee4d000630687713ca520fe3b0222bb17.cu | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<string.h>
#include<cublas.h>
#include<R.h>
#include<Rinternals.h>
#include"cuseful.h"
#define HALF RAND_MAX/2
int hasCudaError(const char * msg) {
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err)
error("cuda error : %s : %s\n", msg, cudaGetErrorString(err));
return 0;
}
void checkCudaError(const char * msg) {
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err) {
if(msg != NULL)
warning(msg);
error(cudaGetErrorString(err));
}
}
char * cublasGetErrorString(cublasStatus err)
{
switch(err) {
case CUBLAS_STATUS_SUCCESS :
return "operation completed successfully";
case CUBLAS_STATUS_NOT_INITIALIZED :
return "CUBLAS library not initialized";
case CUBLAS_STATUS_ALLOC_FAILED :
return "resource allocation failed";
case CUBLAS_STATUS_INVALID_VALUE :
return "unsupported numerical value was passed to function";
case CUBLAS_STATUS_ARCH_MISMATCH :
return "function requires an architectural feature absent from \
the architecture of the device";
case CUBLAS_STATUS_MAPPING_ERROR :
return "access to GPU memory space failed";
case CUBLAS_STATUS_EXECUTION_FAILED :
return "GPU program failed to execute";
case CUBLAS_STATUS_INTERNAL_ERROR :
return "an internal CUBLAS operation failed";
default :
return "unknown error type";
}
}
void checkCublasError(const char * msg)
{
cublasStatus err = cublasGetError();
if(err != CUBLAS_STATUS_SUCCESS)
error("cublas error : %s : %s\n", msg, cublasGetErrorString(err));
}
int hasCublasError(const char * msg)
{
cublasStatus err = cublasGetError();
if(err != CUBLAS_STATUS_SUCCESS)
error("cublas error : %s : %s\n", msg, cublasGetErrorString(err));
return 0;
}
|
93cd1280c654b323038371ba827cd78d6512dd97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** x = 3
**/ #include "vecmultKernel.h"
__global__ void MultiplyVectors(const float* A, const float* B, float* C)
{
int B_start_index = blockIdx.x*ValuesPerThread;
int A_start_index = threadIdx.x*ValuesPerThread;
int C_width = gridDim.x*ValuesPerThread;
int t;
float c_0_0, c_0_1, c_0_2, c_1_0, c_1_1, c_1_2, c_2_0, c_2_1, c_2_2;
float a_0, a_1, a_2;
float b_0, b_1, b_2;
a_0 = A[A_start_index+0];
a_1 = A[A_start_index+1];
a_2 = A[A_start_index+2];
b_0 = B[B_start_index+0];
b_1 = B[B_start_index+1];
b_2 = B[B_start_index+2];
c_0_0 = 0;
c_0_1 = 0;
c_0_2 = 0;
c_1_0 = 0;
c_1_1 = 0;
c_1_2 = 0;
c_2_0 = 0;
c_2_1 = 0;
c_2_2 = 0;
for (t = 0; t < k; t++) {
c_0_0 += a_0*b_0;
c_0_1 += a_0*b_1;
c_0_2 += a_0*b_2;
c_1_0 += a_1*b_0;
c_1_1 += a_1*b_1;
c_1_2 += a_1*b_2;
c_2_0 += a_2*b_0;
c_2_1 += a_2*b_1;
c_2_2 += a_2*b_2;
a_0 = a_0*1.1f+1.7f;
a_1 = a_1*1.1f+1.7f;
a_2 = a_2*1.1f+1.7f;
b_0 = b_0*1.1f+1.7f;
b_1 = b_1*1.1f+1.7f;
b_2 = b_2*1.1f+1.7f;
}
C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0;
C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1;
C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2;
C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0;
C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1;
C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2;
C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0;
C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1;
C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2;
}
| 93cd1280c654b323038371ba827cd78d6512dd97.cu | /** x = 3
**/ #include "vecmultKernel.h"
__global__ void MultiplyVectors(const float* A, const float* B, float* C)
{
int B_start_index = blockIdx.x*ValuesPerThread;
int A_start_index = threadIdx.x*ValuesPerThread;
int C_width = gridDim.x*ValuesPerThread;
int t;
float c_0_0, c_0_1, c_0_2, c_1_0, c_1_1, c_1_2, c_2_0, c_2_1, c_2_2;
float a_0, a_1, a_2;
float b_0, b_1, b_2;
a_0 = A[A_start_index+0];
a_1 = A[A_start_index+1];
a_2 = A[A_start_index+2];
b_0 = B[B_start_index+0];
b_1 = B[B_start_index+1];
b_2 = B[B_start_index+2];
c_0_0 = 0;
c_0_1 = 0;
c_0_2 = 0;
c_1_0 = 0;
c_1_1 = 0;
c_1_2 = 0;
c_2_0 = 0;
c_2_1 = 0;
c_2_2 = 0;
for (t = 0; t < k; t++) {
c_0_0 += a_0*b_0;
c_0_1 += a_0*b_1;
c_0_2 += a_0*b_2;
c_1_0 += a_1*b_0;
c_1_1 += a_1*b_1;
c_1_2 += a_1*b_2;
c_2_0 += a_2*b_0;
c_2_1 += a_2*b_1;
c_2_2 += a_2*b_2;
a_0 = a_0*1.1f+1.7f;
a_1 = a_1*1.1f+1.7f;
a_2 = a_2*1.1f+1.7f;
b_0 = b_0*1.1f+1.7f;
b_1 = b_1*1.1f+1.7f;
b_2 = b_2*1.1f+1.7f;
}
C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0;
C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1;
C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2;
C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0;
C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1;
C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2;
C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0;
C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1;
C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2;
}
|
17425f748638237e8ec902fefa52834f87decc35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mpi.h>
#include <fmt/format.h>
#include <iostream>
#include "omp.h"
#include<map>
#include <roctracer/roctx.h>
#include "clara/clara.hpp"
#include "pangolin/pangolin.cuh"
#include "pangolin/pangolin.hpp"
#include "pangolin/algorithm/zero.cuh"
#define UT uint32_t
int getMaxK(std::map<UT, int> degree)
{
typedef std::map<UT, int>::reverse_iterator it_type;
int maxK = 0;
int reverseCount = 0;
bool getNext = false;
for (it_type m = degree.rbegin(); m != degree.rend(); m++)
{
int degree = m->first;
int proposedKmax = degree + 1;
reverseCount += m->second;
if (reverseCount >= proposedKmax)
{
maxK = proposedKmax;
break;
}
}
return maxK;
}
int main(int argc, char **argv) {
pangolin::init();
pangolin::Config config;
std::vector<int> gpus;
std::string path;
int iters = 1;
bool help = false;
bool debug = false;
bool verbose = false;
bool readMostly = false;
bool accessedBy = false;
bool prefetchAsync = false;
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli |
clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(gpus, "ids")["-g"]("gpus to use");
cli = cli | clara::Opt(readMostly)["--read-mostly"](
"mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(accessedBy)["--accessed-by"](
"mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(prefetchAsync)["--prefetch-async"](
"prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(iters, "N")["-n"]("number of counts");
cli =
cli | clara::Arg(path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(error, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR,
PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
// read data
auto start = std::chrono::system_clock::now();
pangolin::EdgeListFile file(path);
std::vector<pangolin::EdgeTy<UT>> edges;
std::vector<pangolin::EdgeTy<UT>> fileEdges;
while (file.get_edges(fileEdges, 10)) {
edges.insert(edges.end(), fileEdges.begin(), fileEdges.end());
}
double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "read_data time {}s", elapsed);
LOG(debug, "read {} edges", edges.size());
// create csr and count `iters` times
std::vector<double> times;
uint64_t nnz;
// create csr
start = std::chrono::system_clock::now();
auto upperTriangular = [](pangolin::EdgeTy<UT> e) {
return true; //e.first < e.second;
};
auto csr = pangolin::COO<UT>::from_edges(edges.begin(), edges.end(),
upperTriangular);
LOG(debug, "nnz = {}", csr.nnz());
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "create CSR time {}s", elapsed);
// accessed-by
roctxRangePush("accessed-by");
start = std::chrono::system_clock::now();
if (accessedBy) {
for (const auto &gpu : gpus) {
csr.accessed_by(gpu);
CUDA_RUNTIME(hipSetDevice(gpu));
CUDA_RUNTIME(hipDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
roctxRangePop();
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
roctxRangePush("prefetch");
start = std::chrono::system_clock::now();
if (prefetchAsync) {
for (const auto &gpu : gpus) {
csr.prefetch_async(gpu);
CUDA_RUNTIME(hipSetDevice(gpu));
CUDA_RUNTIME(hipDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
roctxRangePop();
LOG(info, "prefetch CSR time {}s", elapsed);
// count triangles
roctxRangePush("count");
//start = std::chrono::system_clock::now();
{
start = std::chrono::system_clock::now();
//csr.read_mostly();
UT *rowPtr = csr.rowPtr_.data();
UT *rowInd = csr.rowInd_.data();
UT *colInd = csr.colInd_.data();
int numEdges = csr.nnz();
int numGpus = gpus.size();
int numNodes = csr.num_nodes();
int edgesPerGPU = (numEdges + numGpus - 1) / numGpus;
pangolin::Vector<UT> uSrcKp(numEdges);
pangolin::Vector<UT> uDstKp(numEdges);
pangolin::Vector<UT> uReversed(numEdges);
printf("NNZ=%d\n", numEdges);
// create async counters
std::vector<pangolin::MultiGPU_Ktruss_Incremental> trussCounters;
for (int dev : gpus) {
LOG(info, "create device {} counter", dev);
auto counter = pangolin::MultiGPU_Ktruss_Incremental(numEdges, dev);
counter.CreateWorkspace(numEdges);
counter.selectedOut[0] = numEdges;
trussCounters.push_back(counter);
counter.InitializeWorkSpace_async(numEdges);
}
int edgeStart = 0;
for (auto &counter : trussCounters)
{
const size_t edgeStop = ::min(edgeStart + edgesPerGPU, numEdges);
const size_t edgesToProcess = edgeStop - edgeStart;
counter.Inialize_Unified_async(edgeStart, edgesToProcess, rowPtr, rowInd, colInd, uSrcKp.data(), uReversed.data());
edgeStart += edgesPerGPU;
}
//uReversed.read_mostly();
UT *ptrSrc, *ptrDst;
UT *s1, *d1, *s2, *d2;
s1 = rowInd;
d1 = colInd;
s2 = uSrcKp.data();
d2 = uDstKp.data();
ptrSrc = s1;
ptrDst = d1;
int kmin = 3;
int kmax=-1;
constexpr int dimBlock = 32; //For edges and nodes
int dimGridEdges = (numEdges + dimBlock - 1) / dimBlock;
CUDA_RUNTIME(hipMemAdvise(ptrSrc, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipMemAdvise(ptrDst, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipMemAdvise(uReversed.data(), numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
while(true)
{
//CUDA_RUNTIME(hipMemAdvise(ptrSrc, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
//CUDA_RUNTIME(hipMemAdvise(ptrDst, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
//CUDA_RUNTIME(hipMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
//uReversed.read_mostly();
bool firstTry = true;
printf("Kmin = %d\n", kmin);
for (auto &counter : trussCounters)
{
counter.setDevice();
counter.hnumaffected[0] = 1;
CUDA_RUNTIME(hipMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream()));
}
bool assumpAffected = true;
dimGridEdges = (numEdges + dimBlock - 1) / dimBlock;
/*roctxRangePush("kernel per k");
start = std::chrono::system_clock::now();*/
while(assumpAffected)
{
assumpAffected = false;
for (int i=0; i<numGpus;i++)
{
auto& counter = trussCounters[i];
counter.setDevice();
if(counter.hnumaffected[0]>0)
{
hipLaunchKernelGGL(( core_direct<dimBlock>), dim3(dimGridEdges),dim3(dimBlock),0,counter.stream(), counter.gnumdeleted,
counter.gnumaffected, kmin+i+1, 0, numEdges,
rowPtr, ptrSrc, ptrDst, counter.gKeep, counter.gAffected, uReversed.data(), firstTry, 1);
//Copy to host
CUDA_RUNTIME(hipMemcpyAsync(counter.hnumaffected, counter.gnumaffected, sizeof(UT), hipMemcpyDeviceToHost, counter.stream()));
CUDA_RUNTIME(hipMemcpyAsync(counter.hnumdeleted, counter.gnumdeleted, sizeof(UT), hipMemcpyDeviceToHost, counter.stream()));
//Set gpu data to zeros
CUDA_RUNTIME(hipMemsetAsync(counter.gnumdeleted,0,sizeof(UT),counter.stream()));
CUDA_RUNTIME(hipMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream()));
}
}
for (int i=0; i<numGpus;i++)
{
auto& counter = trussCounters[i];
counter.setDevice();
counter.sync();
assumpAffected = assumpAffected || (counter.hnumaffected[0]>0);
counter.percentage_deleted_k = (counter.hnumdeleted[0])*1.0/numEdges;
}
firstTry = false;
}
/*elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
roctxRangePop();
LOG(info, "------------------------------kernel time per k {}s", elapsed);
roctxRangePush("Rest of computaion: transfer best k + soft compact per k");
start = std::chrono::system_clock::now();*/
bool foundKmax = false;
int fallBackGPU = -1;
for (int i=0; i<numGpus;i++)
{
auto& counter = trussCounters[i];
counter.setDevice();
printf("deleted=%d of %d\n", counter.hnumdeleted[0], counter.selectedOut[0]);
if(numEdges - counter.hnumdeleted[0] > 2)
{
kmax = kmin + i + 1;
fallBackGPU = i;
}
else
{
foundKmax = true;
break;
}
}
kmin += numGpus;
int counter = 0;
if(!foundKmax)
{
printf("kmin = %d ---------------------------------------------------------------------------\n", kmin);
printf("uset for rowPtr and Reversed\n");
CUDA_RUNTIME(hipMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), hipMemAdviseUnsetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipMemAdvise(uReversed.data(), numEdges * sizeof(UT), hipMemAdviseUnsetReadMostly, 0 /* ignored */));
//each gpu stores latest keep
auto& c = trussCounters[fallBackGPU];
c.setDevice();
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream());
CUDA_RUNTIME(hipMalloc(&d_temp_storage, temp_storage_bytes));
cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream());
cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d1, c.gKeep, d2, c.selectedOut, numEdges, c.stream());
CUDA_RUNTIME(hipFree(d_temp_storage));
hipDeviceSynchronize();
CUDA_RUNTIME(hipGetLastError());
printf("uset for Source row and column\n");
CUDA_RUNTIME(hipMemAdvise(s1, numEdges * sizeof(UT), hipMemAdviseUnsetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipMemAdvise(d1, numEdges * sizeof(UT), hipMemAdviseUnsetReadMostly, 0 /* ignored */));
printf("set for destination row and column\n");
CUDA_RUNTIME(hipMemAdvise(s2, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipMemAdvise(d2, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
numEdges = c.selectedOut[0];
edgesPerGPU = (numEdges + numGpus - 1) / numGpus;
printf("Progress: Kmax=%d, fallback=%d, Remaining=%d\n", kmax, fallBackGPU, numEdges);
ptrSrc = s2;
s2 = s1;
s1 = ptrSrc;
ptrDst = d2;
d2 = d1;
d1 = ptrDst;
int edgeStart = 0;
for (auto &counter : trussCounters)
{
counter.setDevice();
counter.InitializeWorkSpace_async(numEdges);
const size_t edgeStop = ::min(edgeStart + edgesPerGPU, numEdges);
const size_t edgesToProcess = edgeStop - edgeStart;
int dimGridLef = (edgesToProcess + dimBlock - 1) / dimBlock;
hipLaunchKernelGGL(( RebuildArrays<dimBlock>), dim3(dimGridLef),dim3(dimBlock),0,counter.stream(), edgeStart, edgesToProcess, numEdges, rowPtr, ptrSrc);
edgeStart += edgesPerGPU;
}
edgeStart = 0;
for (auto &counter : trussCounters)
{
counter.setDevice();
const size_t edgeStop = ::min(edgeStart + edgesPerGPU, numEdges);
const size_t edgesToProcess = edgeStop - edgeStart;
int dimGridLef = (edgesToProcess + dimBlock - 1) / dimBlock;
hipLaunchKernelGGL(( RebuildReverse<dimBlock>), dim3(dimGridLef),dim3(dimBlock),0,counter.stream(), edgeStart, edgesToProcess, rowPtr, ptrSrc, ptrDst, uReversed.data());
edgeStart += edgesPerGPU;
}
printf("set for rowPtr and reversed\n");
CUDA_RUNTIME(hipMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipMemAdvise(uReversed.data(), numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(hipGetLastError());
}
else{
break;
}
}
//printf("New Kmin = %d, New Kmax=%d\n", newKmin, newKmax);
for (auto &counter : trussCounters)
counter.free();
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
roctxRangePop();
LOG(info, "count time {}s", elapsed);
LOG(info, "MOHA {} ktruss ({} teps)", kmax, csr.nnz() / elapsed);
times.push_back(elapsed);
//tris = total;
nnz = csr.nnz();
//std::cout << path << ",\t" << nnz << ",\t" << tris;
for (const auto &t : times) {
std::cout << ",\t" << t;
}
std::cout << std::endl;
}
return 0;
}
| 17425f748638237e8ec902fefa52834f87decc35.cu | #include <mpi.h>
#include <fmt/format.h>
#include <iostream>
#include "omp.h"
#include<map>
#include <nvToolsExt.h>
#include "clara/clara.hpp"
#include "pangolin/pangolin.cuh"
#include "pangolin/pangolin.hpp"
#include "pangolin/algorithm/zero.cuh"
#define UT uint32_t
int getMaxK(std::map<UT, int> degree)
{
typedef std::map<UT, int>::reverse_iterator it_type;
int maxK = 0;
int reverseCount = 0;
bool getNext = false;
for (it_type m = degree.rbegin(); m != degree.rend(); m++)
{
int degree = m->first;
int proposedKmax = degree + 1;
reverseCount += m->second;
if (reverseCount >= proposedKmax)
{
maxK = proposedKmax;
break;
}
}
return maxK;
}
int main(int argc, char **argv) {
pangolin::init();
pangolin::Config config;
std::vector<int> gpus;
std::string path;
int iters = 1;
bool help = false;
bool debug = false;
bool verbose = false;
bool readMostly = false;
bool accessedBy = false;
bool prefetchAsync = false;
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli |
clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(gpus, "ids")["-g"]("gpus to use");
cli = cli | clara::Opt(readMostly)["--read-mostly"](
"mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(accessedBy)["--accessed-by"](
"mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(prefetchAsync)["--prefetch-async"](
"prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(iters, "N")["-n"]("number of counts");
cli =
cli | clara::Arg(path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(error, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR,
PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
// read data
auto start = std::chrono::system_clock::now();
pangolin::EdgeListFile file(path);
std::vector<pangolin::EdgeTy<UT>> edges;
std::vector<pangolin::EdgeTy<UT>> fileEdges;
while (file.get_edges(fileEdges, 10)) {
edges.insert(edges.end(), fileEdges.begin(), fileEdges.end());
}
double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "read_data time {}s", elapsed);
LOG(debug, "read {} edges", edges.size());
// create csr and count `iters` times
std::vector<double> times;
uint64_t nnz;
// create csr
start = std::chrono::system_clock::now();
auto upperTriangular = [](pangolin::EdgeTy<UT> e) {
return true; //e.first < e.second;
};
auto csr = pangolin::COO<UT>::from_edges(edges.begin(), edges.end(),
upperTriangular);
LOG(debug, "nnz = {}", csr.nnz());
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "create CSR time {}s", elapsed);
// accessed-by
nvtxRangePush("accessed-by");
start = std::chrono::system_clock::now();
if (accessedBy) {
for (const auto &gpu : gpus) {
csr.accessed_by(gpu);
CUDA_RUNTIME(cudaSetDevice(gpu));
CUDA_RUNTIME(cudaDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
nvtxRangePop();
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
nvtxRangePush("prefetch");
start = std::chrono::system_clock::now();
if (prefetchAsync) {
for (const auto &gpu : gpus) {
csr.prefetch_async(gpu);
CUDA_RUNTIME(cudaSetDevice(gpu));
CUDA_RUNTIME(cudaDeviceSynchronize());
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
nvtxRangePop();
LOG(info, "prefetch CSR time {}s", elapsed);
// count triangles
nvtxRangePush("count");
//start = std::chrono::system_clock::now();
{
start = std::chrono::system_clock::now();
//csr.read_mostly();
UT *rowPtr = csr.rowPtr_.data();
UT *rowInd = csr.rowInd_.data();
UT *colInd = csr.colInd_.data();
int numEdges = csr.nnz();
int numGpus = gpus.size();
int numNodes = csr.num_nodes();
int edgesPerGPU = (numEdges + numGpus - 1) / numGpus;
pangolin::Vector<UT> uSrcKp(numEdges);
pangolin::Vector<UT> uDstKp(numEdges);
pangolin::Vector<UT> uReversed(numEdges);
printf("NNZ=%d\n", numEdges);
// create async counters
std::vector<pangolin::MultiGPU_Ktruss_Incremental> trussCounters;
for (int dev : gpus) {
LOG(info, "create device {} counter", dev);
auto counter = pangolin::MultiGPU_Ktruss_Incremental(numEdges, dev);
counter.CreateWorkspace(numEdges);
counter.selectedOut[0] = numEdges;
trussCounters.push_back(counter);
counter.InitializeWorkSpace_async(numEdges);
}
int edgeStart = 0;
for (auto &counter : trussCounters)
{
const size_t edgeStop = std::min(edgeStart + edgesPerGPU, numEdges);
const size_t edgesToProcess = edgeStop - edgeStart;
counter.Inialize_Unified_async(edgeStart, edgesToProcess, rowPtr, rowInd, colInd, uSrcKp.data(), uReversed.data());
edgeStart += edgesPerGPU;
}
//uReversed.read_mostly();
UT *ptrSrc, *ptrDst;
UT *s1, *d1, *s2, *d2;
s1 = rowInd;
d1 = colInd;
s2 = uSrcKp.data();
d2 = uDstKp.data();
ptrSrc = s1;
ptrDst = d1;
int kmin = 3;
int kmax=-1;
constexpr int dimBlock = 32; //For edges and nodes
int dimGridEdges = (numEdges + dimBlock - 1) / dimBlock;
CUDA_RUNTIME(cudaMemAdvise(ptrSrc, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaMemAdvise(ptrDst, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaMemAdvise(uReversed.data(), numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
while(true)
{
//CUDA_RUNTIME(cudaMemAdvise(ptrSrc, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
//CUDA_RUNTIME(cudaMemAdvise(ptrDst, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
//CUDA_RUNTIME(cudaMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
//uReversed.read_mostly();
bool firstTry = true;
printf("Kmin = %d\n", kmin);
for (auto &counter : trussCounters)
{
counter.setDevice();
counter.hnumaffected[0] = 1;
CUDA_RUNTIME(cudaMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream()));
}
bool assumpAffected = true;
dimGridEdges = (numEdges + dimBlock - 1) / dimBlock;
/*nvtxRangePush("kernel per k");
start = std::chrono::system_clock::now();*/
while(assumpAffected)
{
assumpAffected = false;
for (int i=0; i<numGpus;i++)
{
auto& counter = trussCounters[i];
counter.setDevice();
if(counter.hnumaffected[0]>0)
{
core_direct<dimBlock><<<dimGridEdges,dimBlock,0,counter.stream()>>>(counter.gnumdeleted,
counter.gnumaffected, kmin+i+1, 0, numEdges,
rowPtr, ptrSrc, ptrDst, counter.gKeep, counter.gAffected, uReversed.data(), firstTry, 1);
//Copy to host
CUDA_RUNTIME(cudaMemcpyAsync(counter.hnumaffected, counter.gnumaffected, sizeof(UT), cudaMemcpyDeviceToHost, counter.stream()));
CUDA_RUNTIME(cudaMemcpyAsync(counter.hnumdeleted, counter.gnumdeleted, sizeof(UT), cudaMemcpyDeviceToHost, counter.stream()));
//Set gpu data to zeros
CUDA_RUNTIME(cudaMemsetAsync(counter.gnumdeleted,0,sizeof(UT),counter.stream()));
CUDA_RUNTIME(cudaMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream()));
}
}
for (int i=0; i<numGpus;i++)
{
auto& counter = trussCounters[i];
counter.setDevice();
counter.sync();
assumpAffected = assumpAffected || (counter.hnumaffected[0]>0);
counter.percentage_deleted_k = (counter.hnumdeleted[0])*1.0/numEdges;
}
firstTry = false;
}
/*elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
nvtxRangePop();
LOG(info, "------------------------------kernel time per k {}s", elapsed);
nvtxRangePush("Rest of computaion: transfer best k + soft compact per k");
start = std::chrono::system_clock::now();*/
bool foundKmax = false;
int fallBackGPU = -1;
for (int i=0; i<numGpus;i++)
{
auto& counter = trussCounters[i];
counter.setDevice();
printf("deleted=%d of %d\n", counter.hnumdeleted[0], counter.selectedOut[0]);
if(numEdges - counter.hnumdeleted[0] > 2)
{
kmax = kmin + i + 1;
fallBackGPU = i;
}
else
{
foundKmax = true;
break;
}
}
kmin += numGpus;
int counter = 0;
if(!foundKmax)
{
printf("kmin = %d ---------------------------------------------------------------------------\n", kmin);
printf("uset for rowPtr and Reversed\n");
CUDA_RUNTIME(cudaMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaMemAdvise(uReversed.data(), numEdges * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0 /* ignored */));
//each gpu stores latest keep
auto& c = trussCounters[fallBackGPU];
c.setDevice();
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream());
CUDA_RUNTIME(cudaMalloc(&d_temp_storage, temp_storage_bytes));
cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream());
cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d1, c.gKeep, d2, c.selectedOut, numEdges, c.stream());
CUDA_RUNTIME(cudaFree(d_temp_storage));
cudaDeviceSynchronize();
CUDA_RUNTIME(cudaGetLastError());
printf("uset for Source row and column\n");
CUDA_RUNTIME(cudaMemAdvise(s1, numEdges * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaMemAdvise(d1, numEdges * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0 /* ignored */));
printf("set for destination row and column\n");
CUDA_RUNTIME(cudaMemAdvise(s2, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaMemAdvise(d2, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
numEdges = c.selectedOut[0];
edgesPerGPU = (numEdges + numGpus - 1) / numGpus;
printf("Progress: Kmax=%d, fallback=%d, Remaining=%d\n", kmax, fallBackGPU, numEdges);
ptrSrc = s2;
s2 = s1;
s1 = ptrSrc;
ptrDst = d2;
d2 = d1;
d1 = ptrDst;
int edgeStart = 0;
for (auto &counter : trussCounters)
{
counter.setDevice();
counter.InitializeWorkSpace_async(numEdges);
const size_t edgeStop = std::min(edgeStart + edgesPerGPU, numEdges);
const size_t edgesToProcess = edgeStop - edgeStart;
int dimGridLef = (edgesToProcess + dimBlock - 1) / dimBlock;
RebuildArrays<dimBlock><<<dimGridLef,dimBlock,0,counter.stream()>>>(edgeStart, edgesToProcess, numEdges, rowPtr, ptrSrc);
edgeStart += edgesPerGPU;
}
edgeStart = 0;
for (auto &counter : trussCounters)
{
counter.setDevice();
const size_t edgeStop = std::min(edgeStart + edgesPerGPU, numEdges);
const size_t edgesToProcess = edgeStop - edgeStart;
int dimGridLef = (edgesToProcess + dimBlock - 1) / dimBlock;
RebuildReverse<dimBlock><<<dimGridLef,dimBlock,0,counter.stream()>>>(edgeStart, edgesToProcess, rowPtr, ptrSrc, ptrDst, uReversed.data());
edgeStart += edgesPerGPU;
}
printf("set for rowPtr and reversed\n");
CUDA_RUNTIME(cudaMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaMemAdvise(uReversed.data(), numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */));
CUDA_RUNTIME(cudaGetLastError());
}
else{
break;
}
}
//printf("New Kmin = %d, New Kmax=%d\n", newKmin, newKmax);
for (auto &counter : trussCounters)
counter.free();
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
nvtxRangePop();
LOG(info, "count time {}s", elapsed);
LOG(info, "MOHA {} ktruss ({} teps)", kmax, csr.nnz() / elapsed);
times.push_back(elapsed);
//tris = total;
nnz = csr.nnz();
//std::cout << path << ",\t" << nnz << ",\t" << tris;
for (const auto &t : times) {
std::cout << ",\t" << t;
}
std::cout << std::endl;
}
return 0;
}
|
1a9ac3dba458a2ccc670dd0926ba41af0f26c6a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdlib.h>
#include<stdio.h>
#include<time.h>
//
__global__ void global_reduce(float *d_in,float *d_out){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idxn = threadIdx.x;
for(int s = blockDim.x/2;s>0;s>>=1){
if(idxn<s){
d_in[idx] += d_in[idx+s];
}
__syncthreads();//
}
if(idxn == 0){
d_out[blockIdx.x] = d_in[idx];
}
}
//
__global__ void shared_reduce(float *d_in,float *d_out){
extern __shared__ float s_in[];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idxn = threadIdx.x;
s_in[idxn] = d_in[idx];
__syncthreads();
for(int s = blockDim.x/2;s>0;s>>=1){
if(idxn<s){
s_in[idxn] += s_in[idxn+s];
}
__syncthreads();//
}
if(idxn == 0){
d_out[blockIdx.x] = s_in[0];
}
}
void init(float *h_in,const int size){
srand((unsigned int)time(NULL));
for(int i=0;i<size;i++)
h_in[i] =(float)(rand()%101)/100.0f;
}
int main(){
int size = 1024;
float *h_in;
float h_out = 0;
h_in = (float *)malloc(size*size*sizeof(float));
init(h_in,size*size);//
time_t t_start = clock();
for(int i=0;i<size*size;i++){
h_out += h_in[i];
}
time_t t_end = clock();
printf("CPU sum:%f\n",h_out);
printf("CPU time:%fms\n",difftime(t_end,t_start));
float *d_in;
float *d_out;
float *d_out_mid;
dim3 block(size);
dim3 thread(size);
hipMalloc((float **)&d_in,size*size*sizeof(float));
hipMalloc((float **)&d_out_mid,size*sizeof(float));
hipMalloc((float **)&d_out,sizeof(float));
hipMemcpy(d_in,h_in,size*size*sizeof(float),hipMemcpyHostToDevice);
t_start = clock();
hipLaunchKernelGGL(( global_reduce), dim3(block),dim3(thread), 0, 0, d_in,d_out_mid);
hipLaunchKernelGGL(( global_reduce), dim3(1),dim3(thread), 0, 0, d_out_mid,d_out);
t_end = clock();
hipMemcpy(&h_out,d_out,sizeof(float),hipMemcpyDeviceToHost);
printf("GPU(global) sum:%f\n",h_out);
printf("GPU(global) time:%fms\n",difftime(t_end,t_start));
hipMemcpy(d_in,h_in,size*size*sizeof(float),hipMemcpyHostToDevice);
t_start = clock();
hipLaunchKernelGGL(( shared_reduce), dim3(block),dim3(thread),size*sizeof(float), 0, d_in,d_out_mid);
hipLaunchKernelGGL(( shared_reduce), dim3(1),dim3(thread),size*sizeof(float), 0, d_out_mid,d_out);
t_end = clock();
hipMemcpy(&h_out,d_out,sizeof(float),hipMemcpyDeviceToHost);
printf("GPU(shared) sum:%f\n",h_out);
printf("GPU(shared) time:%fms\n",difftime(t_end,t_start));
free(h_in);
hipFree(d_in);
hipFree(d_out_mid);
hipFree(d_out);
hipDeviceReset();//
return 0;
}
| 1a9ac3dba458a2ccc670dd0926ba41af0f26c6a8.cu | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
//全局内存
__global__ void global_reduce(float *d_in,float *d_out){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idxn = threadIdx.x;
for(int s = blockDim.x/2;s>0;s>>=1){
if(idxn<s){
d_in[idx] += d_in[idx+s];
}
__syncthreads();//同步
}
if(idxn == 0){
d_out[blockIdx.x] = d_in[idx];
}
}
//共享内存
__global__ void shared_reduce(float *d_in,float *d_out){
extern __shared__ float s_in[];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idxn = threadIdx.x;
s_in[idxn] = d_in[idx];
__syncthreads();
for(int s = blockDim.x/2;s>0;s>>=1){
if(idxn<s){
s_in[idxn] += s_in[idxn+s];
}
__syncthreads();//同步
}
if(idxn == 0){
d_out[blockIdx.x] = s_in[0];
}
}
void init(float *h_in,const int size){
srand((unsigned int)time(NULL));
for(int i=0;i<size;i++)
h_in[i] =(float)(rand()%101)/100.0f;
}
int main(){
int size = 1024;
float *h_in;
float h_out = 0;
h_in = (float *)malloc(size*size*sizeof(float));
init(h_in,size*size);//初始化
time_t t_start = clock();
for(int i=0;i<size*size;i++){
h_out += h_in[i];
}
time_t t_end = clock();
printf("CPU sum:%f\n",h_out);
printf("CPU time:%fms\n",difftime(t_end,t_start));
float *d_in;
float *d_out;
float *d_out_mid;
dim3 block(size);
dim3 thread(size);
cudaMalloc((float **)&d_in,size*size*sizeof(float));
cudaMalloc((float **)&d_out_mid,size*sizeof(float));
cudaMalloc((float **)&d_out,sizeof(float));
cudaMemcpy(d_in,h_in,size*size*sizeof(float),cudaMemcpyHostToDevice);
t_start = clock();
global_reduce<<<block,thread>>>(d_in,d_out_mid);
global_reduce<<<1,thread>>>(d_out_mid,d_out);
t_end = clock();
cudaMemcpy(&h_out,d_out,sizeof(float),cudaMemcpyDeviceToHost);
printf("GPU(global) sum:%f\n",h_out);
printf("GPU(global) time:%fms\n",difftime(t_end,t_start));
cudaMemcpy(d_in,h_in,size*size*sizeof(float),cudaMemcpyHostToDevice);
t_start = clock();
shared_reduce<<<block,thread,size*sizeof(float)>>>(d_in,d_out_mid);
shared_reduce<<<1,thread,size*sizeof(float)>>>(d_out_mid,d_out);
t_end = clock();
cudaMemcpy(&h_out,d_out,sizeof(float),cudaMemcpyDeviceToHost);
printf("GPU(shared) sum:%f\n",h_out);
printf("GPU(shared) time:%fms\n",difftime(t_end,t_start));
free(h_in);
cudaFree(d_in);
cudaFree(d_out_mid);
cudaFree(d_out);
cudaDeviceReset();//重置当前资源
return 0;
}
|
c6041c93be35eaba525791994d70ab1f8d503b8f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iterator/iterator.cuh>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <cudf/utilities/legacy/wrapper_types.hpp>
#include <utilities/column_utils.hpp>
#include <io/utilities/wrapper_utils.hpp>
#include <cudf/search.hpp>
#include <cudf/copying.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace cudf {
namespace {
template <typename DataIterator, typename ValuesIterator, typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
gdf_size_type data_size,
gdf_size_type values_size,
void* output,
Comparator comp,
bool find_first,
hipStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
}
} // namespace
namespace detail {
gdf_column search_ordered(table const& t,
table const& values,
bool find_first,
std::vector<bool> const& desc_flags,
bool nulls_as_largest,
hipStream_t stream = 0)
{
// Allocate result column
gdf_column result_like{};
result_like.dtype = GDF_INT32;
result_like.size = values.num_rows();
result_like.data = values.get_column(0)->data;
auto result = allocate_like(result_like);
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(hipMemset(result.data, 0, values.num_rows()));
if (is_nullable(result)) {
CUDA_TRY(hipMemset(result.valid, 0, values.num_rows()));
}
}
auto d_t = device_table::create(t, stream);
auto d_values = device_table::create(values, stream);
auto count_it = thrust::make_counting_iterator(0);
rmm::device_vector<int8_t> dv_desc_flags(desc_flags);
auto d_desc_flags = dv_desc_flags.data().get();
if ( has_nulls(t) ) {
auto ineq_op = (find_first)
? row_inequality_comparator<true>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<true>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
else {
auto ineq_op = (find_first)
? row_inequality_comparator<false>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<false>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
return result;
}
template <bool nullable = true>
struct compare_with_value{
compare_with_value(device_table t, device_table val, bool nulls_are_equal = true)
: compare(t, val, nulls_are_equal) {}
__device__ bool operator()(gdf_index_type i){
return compare(i, 0);
}
row_equality_comparator<nullable> compare;
};
bool contains(gdf_column const& column,
gdf_scalar const& value,
hipStream_t stream = 0)
{
CUDF_EXPECTS(column.dtype == value.dtype, "DTYPE mismatch");
// No element to compare against
if (column.size == 0) {
return false;
}
if (value.is_valid == false){
return cudf::has_nulls(column);
}
// Create column with scalar's data
gdf_column_wrapper val (1, value.dtype, gdf_dtype_extra_info{}, "");
RMM_TRY(RMM_ALLOC(&val.get()->data, cudf::size_of(value.dtype), stream));
CUDA_TRY(hipMemcpyAsync(val.get()->data, (void*) &value.data,
cudf::size_of(value.dtype), hipMemcpyHostToDevice, stream));
gdf_column* tmp_column = const_cast<gdf_column *> (&column);
gdf_column* tmp_value = val.get();
// Creating a single column device table
auto d_t = device_table::create(1, &tmp_column, stream);
auto d_value = device_table::create(1, &tmp_value, stream);
auto data_it = thrust::make_counting_iterator(0);
if (cudf::has_nulls(column)) {
auto eq_op = compare_with_value<true>(*d_t, *d_value, true);
return thrust::any_of(rmm::exec_policy(stream)->on(stream),
data_it, data_it + column.size,
eq_op);
}
else {
auto eq_op = compare_with_value<false>(*d_t, *d_value, true);
return thrust::any_of(rmm::exec_policy(stream)->on(stream),
data_it, data_it + column.size,
eq_op);
}
}
} // namespace detail
gdf_column lower_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, true, desc_flags, nulls_as_largest);
}
gdf_column upper_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, false, desc_flags, nulls_as_largest);
}
bool contains(gdf_column const& column, gdf_scalar const& value)
{
return detail::contains(column, value);
}
} // namespace cudf
| c6041c93be35eaba525791994d70ab1f8d503b8f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iterator/iterator.cuh>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <cudf/utilities/legacy/wrapper_types.hpp>
#include <utilities/column_utils.hpp>
#include <io/utilities/wrapper_utils.hpp>
#include <cudf/search.hpp>
#include <cudf/copying.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace cudf {
namespace {
template <typename DataIterator, typename ValuesIterator, typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
gdf_size_type data_size,
gdf_size_type values_size,
void* output,
Comparator comp,
bool find_first,
cudaStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data, it_data + data_size,
it_vals, it_vals + values_size,
static_cast<gdf_index_type*>(output),
comp);
}
}
} // namespace
namespace detail {
gdf_column search_ordered(table const& t,
table const& values,
bool find_first,
std::vector<bool> const& desc_flags,
bool nulls_as_largest,
cudaStream_t stream = 0)
{
// Allocate result column
gdf_column result_like{};
result_like.dtype = GDF_INT32;
result_like.size = values.num_rows();
result_like.data = values.get_column(0)->data;
auto result = allocate_like(result_like);
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(cudaMemset(result.data, 0, values.num_rows()));
if (is_nullable(result)) {
CUDA_TRY(cudaMemset(result.valid, 0, values.num_rows()));
}
}
auto d_t = device_table::create(t, stream);
auto d_values = device_table::create(values, stream);
auto count_it = thrust::make_counting_iterator(0);
rmm::device_vector<int8_t> dv_desc_flags(desc_flags);
auto d_desc_flags = dv_desc_flags.data().get();
if ( has_nulls(t) ) {
auto ineq_op = (find_first)
? row_inequality_comparator<true>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<true>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
else {
auto ineq_op = (find_first)
? row_inequality_comparator<false>(*d_t, *d_values, !nulls_as_largest, d_desc_flags)
: row_inequality_comparator<false>(*d_values, *d_t, !nulls_as_largest, d_desc_flags);
launch_search(count_it, count_it, t.num_rows(), values.num_rows(), result.data,
ineq_op, find_first, stream);
}
return result;
}
template <bool nullable = true>
struct compare_with_value{
compare_with_value(device_table t, device_table val, bool nulls_are_equal = true)
: compare(t, val, nulls_are_equal) {}
__device__ bool operator()(gdf_index_type i){
return compare(i, 0);
}
row_equality_comparator<nullable> compare;
};
bool contains(gdf_column const& column,
gdf_scalar const& value,
cudaStream_t stream = 0)
{
CUDF_EXPECTS(column.dtype == value.dtype, "DTYPE mismatch");
// No element to compare against
if (column.size == 0) {
return false;
}
if (value.is_valid == false){
return cudf::has_nulls(column);
}
// Create column with scalar's data
gdf_column_wrapper val (1, value.dtype, gdf_dtype_extra_info{}, "");
RMM_TRY(RMM_ALLOC(&val.get()->data, cudf::size_of(value.dtype), stream));
CUDA_TRY(cudaMemcpyAsync(val.get()->data, (void*) &value.data,
cudf::size_of(value.dtype), cudaMemcpyHostToDevice, stream));
gdf_column* tmp_column = const_cast<gdf_column *> (&column);
gdf_column* tmp_value = val.get();
// Creating a single column device table
auto d_t = device_table::create(1, &tmp_column, stream);
auto d_value = device_table::create(1, &tmp_value, stream);
auto data_it = thrust::make_counting_iterator(0);
if (cudf::has_nulls(column)) {
auto eq_op = compare_with_value<true>(*d_t, *d_value, true);
return thrust::any_of(rmm::exec_policy(stream)->on(stream),
data_it, data_it + column.size,
eq_op);
}
else {
auto eq_op = compare_with_value<false>(*d_t, *d_value, true);
return thrust::any_of(rmm::exec_policy(stream)->on(stream),
data_it, data_it + column.size,
eq_op);
}
}
} // namespace detail
gdf_column lower_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, true, desc_flags, nulls_as_largest);
}
gdf_column upper_bound(table const& t,
table const& values,
std::vector<bool> const& desc_flags,
bool nulls_as_largest)
{
return detail::search_ordered(t, values, false, desc_flags, nulls_as_largest);
}
bool contains(gdf_column const& column, gdf_scalar const& value)
{
return detail::contains(column, value);
}
} // namespace cudf
|
3f6a0345f7ade297be71fd398b26f20f29d6923e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "flowprojection_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void FlowProjection_gpu_forward_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ];
float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ];
float x2 = (float) (w_i) + fx;
float y2 = (float) (h_i) + fy;
if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){
int ix2_L = (int) (x2);
int iy2_T = (int) (y2);
int ix2_R = min(ix2_L + 1, w - 1);
int iy2_B = min(iy2_T + 1, h - 1);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -fy);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] , 1);
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjectionAveraging_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp > 0.0f){
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowFillhole_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp <= 0.0f){
//search along the four directions,0/90/180/270, until finding at least one
int left_offset = w_i; float left_temp = 0.0f;
while(left_temp == 0.0f && left_offset - 1 >= 0){
left_offset = left_offset - 1;
left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ;
}
int right_offset = w_i ; float right_temp = 0.0f;
while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){
right_offset = right_offset + 1 ;
right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ;
}
int up_offset = h_i ; float up_temp = 0.0f;
while(up_temp == 0.0f && up_offset - 1 >=0){
up_offset = up_offset - 1;
up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ;
}
int down_offset = h_i; float down_temp = 0.0f;
while(down_temp == 0.0f && down_offset + 1 <= h - 1 ){
down_offset = down_offset + 1;
down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ;
}
if(left_temp + right_temp + up_temp + down_temp <=0.0f){
//printf("Can't fill hole, find no neighbor vectors availabel\n");
return;
}
left_temp = (left_temp > 0.0f)?1:0;
right_temp = (right_temp > 0.0f)?1:0;
up_temp = (up_temp > 0.0f)?1:0;
down_temp = (down_temp > 0.0f)?1:0;
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = (
left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =(
left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjection_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
const scalar_t* __restrict__ count,
const scalar_t* __restrict__ gradoutput,
scalar_t* gradinput1
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
if(withinXbounds && withinYbounds){
float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float x2 = (float) ( w_i ) + fx;
float y2 = (float) ( h_i ) + fy;
if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){
int ix2_L = (int)(x2);
int iy2_T = (int)(y2);
int ix2_R = min(ix2_L + 1, w-1);
int iy2_B = min(iy2_T + 1, h-1);
int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ]/
count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ;
int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ;
}
}
return ;
}
int FlowProjection_gpu_forward_kernel(
hipStream_t stream, const int nElement,
const int w, const int h, const int channel, const int batch, const int fillhole,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& output
)
{
int error = 1 ;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// printf("I am here\n");
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowProjection_gpu_forward_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowProjection_gpu_forward_kernelfunc), dim3(grid),dim3(block),0, stream ,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am there\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowProjectionAveraging_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowProjectionAveraging_kernelfunc), dim3(grid),dim3(block),0,stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
// printf("I am kao\n");
// THCudaCheck(hipGetLastError());
err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am dd\n");
if(fillhole){
// printf("use flow fill hole\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowFillhole_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowFillhole_kernelfunc), dim3(grid),dim3(block),0,stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
return error;
}
}
error = 0;
return error;
}
int FlowProjection_gpu_backward_kernel(
hipStream_t stream,
const int nElement,
const int w,
const int h,
const int channel,
const int batch,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& gradoutput,
at::Tensor& gradinput1
)
{
int error = 1 ;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowProjection_gpu_backward_kernelfunc", ([&] {
hipLaunchKernelGGL(( FlowProjection_gpu_backward_kernelfunc) , dim3(grid),dim3(block),0, stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),
count.data<scalar_t>(),
gradoutput.data<scalar_t>(),
gradinput1.data<scalar_t>()
);
}));
// printf("gpu I am there\n");
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpu error in BilinearSampler.updateGradInput %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("gpu I am here\n");
error = 0;
return error;
}
| 3f6a0345f7ade297be71fd398b26f20f29d6923e.cu | #include <stdio.h>
#include "flowprojection_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void FlowProjection_gpu_forward_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ];
float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ];
float x2 = (float) (w_i) + fx;
float y2 = (float) (h_i) + fy;
if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){
int ix2_L = (int) (x2);
int iy2_T = (int) (y2);
int ix2_R = min(ix2_L + 1, w - 1);
int iy2_B = min(iy2_T + 1, h - 1);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-fx);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -fy);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] , 1);
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjectionAveraging_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp > 0.0f){
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowFillhole_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp <= 0.0f){
//search along the four directions,0/90/180/270, until finding at least one
int left_offset = w_i; float left_temp = 0.0f;
while(left_temp == 0.0f && left_offset - 1 >= 0){
left_offset = left_offset - 1;
left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ;
}
int right_offset = w_i ; float right_temp = 0.0f;
while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){
right_offset = right_offset + 1 ;
right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ;
}
int up_offset = h_i ; float up_temp = 0.0f;
while(up_temp == 0.0f && up_offset - 1 >=0){
up_offset = up_offset - 1;
up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ;
}
int down_offset = h_i; float down_temp = 0.0f;
while(down_temp == 0.0f && down_offset + 1 <= h - 1 ){
down_offset = down_offset + 1;
down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ;
}
if(left_temp + right_temp + up_temp + down_temp <=0.0f){
//printf("Can't fill hole, find no neighbor vectors availabel\n");
return;
}
left_temp = (left_temp > 0.0f)?1:0;
right_temp = (right_temp > 0.0f)?1:0;
up_temp = (up_temp > 0.0f)?1:0;
down_temp = (down_temp > 0.0f)?1:0;
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = (
left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =(
left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
}
}
return ;
}
template <typename scalar_t>
__global__ void FlowProjection_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1,
const scalar_t* __restrict__ count,
const scalar_t* __restrict__ gradoutput,
scalar_t* gradinput1
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
if(withinXbounds && withinYbounds){
float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float x2 = (float) ( w_i ) + fx;
float y2 = (float) ( h_i ) + fy;
if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){
int ix2_L = (int)(x2);
int iy2_T = (int)(y2);
int ix2_R = min(ix2_L + 1, w-1);
int iy2_B = min(iy2_T + 1, h-1);
int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ]/
count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ;
int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ;
}
}
return ;
}
int FlowProjection_gpu_forward_kernel(
cudaStream_t stream, const int nElement,
const int w, const int h, const int channel, const int batch, const int fillhole,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& output
)
{
int error = 1 ;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// printf("I am here\n");
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowProjection_gpu_forward_kernelfunc", ([&] {
FlowProjection_gpu_forward_kernelfunc<<<grid,block,0, stream >>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am there\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowProjectionAveraging_kernelfunc", ([&] {
FlowProjectionAveraging_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
// printf("I am kao\n");
// THCudaCheck(cudaGetLastError());
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am dd\n");
if(fillhole){
// printf("use flow fill hole\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowFillhole_kernelfunc", ([&] {
FlowFillhole_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
return error;
}
}
error = 0;
return error;
}
int FlowProjection_gpu_backward_kernel(
cudaStream_t stream,
const int nElement,
const int w,
const int h,
const int channel,
const int batch,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1,
at::Tensor& count,
at::Tensor& gradoutput,
at::Tensor& gradinput1
)
{
int error = 1 ;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "FlowProjection_gpu_backward_kernelfunc", ([&] {
FlowProjection_gpu_backward_kernelfunc <<<grid,block,0, stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),
count.data<scalar_t>(),
gradoutput.data<scalar_t>(),
gradinput1.data<scalar_t>()
);
}));
// printf("gpu I am there\n");
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("gpu I am here\n");
error = 0;
return error;
}
|
d1e06613c7e75af367d7827b40d0ac6ce6cae0b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hetu_gpu_table.h"
#include <cmath>
#include <hip/hip_runtime.h>
#include <hipcub/hipcub.hpp>
#include "common/helper_cuda.h"
namespace hetuCTR {
// This computes keys as <root_id, embedding_id>
__global__ void generate_sort_kv_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < tbl->cur_batch_.batch_size) {
index_t embedding_idx = tbl->cur_batch_.d_idx[id];
assert(embedding_idx < tbl->kEmbeddingIDMax);
worker_t r = tbl->d_root_[embedding_idx];
tbl->cur_batch_.d_idx_map[id] = embedding_idx + tbl->kEmbeddingIDMax * r;
tbl->cur_batch_.d_offset[id] = id;
}
}
__global__ void block_cvt_offset_to_shape_kernel(size_t *dst) {
size_t id = threadIdx.x;
size_t n = blockDim.x;
extern __shared__ size_t shm[];
size_t val = dst[id];
shm[id] = val;
__syncthreads();
size_t val_nxt = id == n - 1 ? val : shm[id + 1];
assert(val_nxt >= val);
dst[id] = val_nxt - val;
}
__global__ void write_sort_result_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < tbl->cur_batch_.batch_size) {
index_t arg = tbl->cur_batch_.d_sorted_arg[id];
index_t embedding_idx = tbl->cur_batch_.d_idx[arg];
tbl->cur_batch_.d_offset[id] = embedding_idx;
}
}
// This will compute cur_batch_.d_idx_map
// cur_batch_.d_root cur_batch_.u_shape
__global__ void preprocess_batch_data_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t n = tbl->cur_batch_.unique_size;
if (id < n) {
index_t uid = tbl->cur_batch_.d_unique_idx[id];
int r = tbl->d_root_[uid], r_prev;
tbl->cur_batch_.d_root[id] = r;
auto iter = tbl->table_->find(uid);
if (iter == tbl->table_->end()) {
tbl->cur_batch_.d_offset[id] = kInvalidIndex;
} else {
tbl->cur_batch_.d_offset[id] = iter->second;
}
if (id == 0) r_prev = -1;
else r_prev = tbl->d_root_[tbl->cur_batch_.d_unique_idx[id - 1]];
for (int i = r_prev + 1; i <= r; i++) {
tbl->cur_batch_.u_shape[i] = id;
}
if (id == n - 1) {
for (int i = r + 1; i <= tbl->nrank_; i++) {
tbl->cur_batch_.u_shape[i] = n;
}
}
}
}
__device__ index_t lowerBound(const index_t *data, size_t start, size_t last, index_t target) {
while (start < last) {
index_t mid = (start + last) / 2;
if (data[mid] >= target) last = mid;
else start = mid + 1;
}
return start;
}
// This computes where we can find the unique index from the original index
__global__ void compute_idx_map_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t n = tbl->cur_batch_.unique_size;
if (id < tbl->cur_batch_.batch_size) {
index_t embedding_idx = tbl->cur_batch_.d_idx[id];
worker_t root = tbl->d_root_[embedding_idx];
tbl->cur_batch_.d_idx_map[id] = lowerBound(tbl->cur_batch_.d_unique_idx,
tbl->cur_batch_.u_shape[root], tbl->cur_batch_.u_shape[root + 1], embedding_idx);
}
}
void HetuTable::preprocessIndex(index_t *data, size_t batch_size) {
if (batch_size == 0)
checkCudaErrors(hipMemsetAsync(
cur_batch_.u_shape, 0, sizeof(size_t) * (nrank_ + 1), stream_main_));
// Copy batch embedding index data into Device
checkCudaErrors(hipMemcpyAsync(
cur_batch_.d_idx, data, sizeof(index_t) * batch_size, hipMemcpyDefault, stream_main_));
// use unused memory here to store temp sort keys
hipLaunchKernelGGL(( generate_sort_kv_kernel), dim3(DIM_GRID(batch_size)), dim3(DIM_BLOCK), 0, stream_main_, d_this);
// we don't need to sort all the bits when using radix sort.
// using end_bit smaller than 64 can yield corresponding performance improvement
int end_bit = ::ceil(std::log2(kEmbeddingIDMax * nrank_));
// store temp unused temp result in d_offset
checkCudaErrors(hipcub::DeviceRadixSort::SortPairs(
d_temp_, temp_bytes_, cur_batch_.d_idx_map, cur_batch_.d_unique_idx, cur_batch_.d_offset, cur_batch_.d_sorted_arg,
batch_size, 0, end_bit, stream_main_));
// After argsort write value to d_offset (temp, modify in next step)
hipLaunchKernelGGL(( write_sort_result_kernel), dim3(DIM_GRID(batch_size)), dim3(DIM_BLOCK), 0, stream_main_, d_this);
// perform unique operation, store total number of unique embedding items;
checkCudaErrors(hipcub::DeviceRunLengthEncode::Encode(
d_temp_, temp_bytes_, cur_batch_.d_offset, cur_batch_.d_unique_idx, cur_batch_.d_run_length,
&(d_this->cur_batch_.unique_size), batch_size, stream_main_));
checkCudaErrors(hipMemcpyAsync(&cur_batch_.unique_size, &(d_this->cur_batch_.unique_size),
sizeof(size_t), hipMemcpyDeviceToHost, stream_main_));
// Store the predix sum of length, this will be used in gradient reduction
// although we should compute [0, unique_size), but we don't want to sync here
checkCudaErrors(hipcub::DeviceScan::ExclusiveSum(d_temp_, temp_bytes_,
cur_batch_.d_run_length, cur_batch_.d_run_length, cur_batch_.batch_size + 1, stream_main_));
// Computes other preprocess data
hipLaunchKernelGGL(( preprocess_batch_data_kernel), dim3(DIM_GRID(cur_batch_.batch_size)), dim3(DIM_BLOCK), 0, stream_main_, d_this);
hipLaunchKernelGGL(( compute_idx_map_kernel), dim3(DIM_GRID(cur_batch_.batch_size)), dim3(DIM_BLOCK), 0, stream_main_, d_this);
// convert offset to shape
hipLaunchKernelGGL(( block_cvt_offset_to_shape_kernel), dim3(1), dim3(nrank_ + 1),
sizeof(size_t) * (nrank_ + 1), stream_main_, cur_batch_.u_shape);
// exchange shape with other workers
all2allExchangeShape(cur_batch_.u_shape, cur_batch_.u_shape_exchanged);
checkCudaErrors(hipMemcpyAsync(cur_batch_.h_shape, cur_batch_.u_shape,
sizeof(size_t) * (nrank_ + 1), hipMemcpyDeviceToHost, stream_main_));
checkCudaErrors(hipMemcpyAsync(cur_batch_.h_shape_exchanged, cur_batch_.u_shape_exchanged,
sizeof(size_t) * (nrank_ + 1), hipMemcpyDeviceToHost, stream_main_));
}
// figure out all gradients to push
// 1. compute d_need_update_ as 0 or 1
// 2. update d_version_ (stored and root=self)
// 3. update d_updates_ (stored and root!=self)
//
__global__ void decide_update_kernel(HetuTable *tbl) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < tbl->prev_batch_.unique_size) {
version_t update_new = tbl->prev_batch_.d_run_length[id + 1] - tbl->prev_batch_.d_run_length[id];
index_t offset = tbl->prev_batch_.d_offset[id];
if (tbl->prev_batch_.d_root[id] == tbl->rank_) {
tbl->d_need_update_[id] = 0;
} else if (offset == kInvalidIndex) {
tbl->d_need_update_[id] = 1;
} else {
// assert(offset < tbl->kNonLocalStorageMax);
version_t update_local = tbl->d_updates_[offset];
tbl->d_need_update_[id] = update_local + update_new <= tbl->push_bound_ ? 0 : 1;
}
if (tbl->d_need_update_[id])
atomicAdd(&tbl->prev_batch_.u_shape[tbl->prev_batch_.d_root[id]], 1);
}
}
void HetuTable::preprocessGradient() {
checkCudaErrors(hipMemsetAsync(prev_batch_.u_shape, 0, nrank_ * sizeof(size_t), stream_main_));
size_t num_unique = prev_batch_.unique_size;
hipLaunchKernelGGL(( decide_update_kernel), dim3(DIM_GRID(num_unique)), dim3(DIM_BLOCK), 0, stream_main_, d_this);
// d_update_prefix_[i] stores which index maps to the gradient communication slot i
checkCudaErrors(hipcub::DeviceScan::ExclusiveSum(d_temp_, temp_bytes_,
d_need_update_, d_update_prefix_, num_unique, stream_main_));
all2allExchangeShape(prev_batch_.u_shape, prev_batch_.u_shape_exchanged);
checkCudaErrors(hipMemcpyAsync(prev_batch_.h_shape, prev_batch_.u_shape,
sizeof(size_t) * (nrank_ + 1), hipMemcpyDeviceToHost, stream_main_));
checkCudaErrors(hipMemcpyAsync(prev_batch_.h_shape_exchanged, prev_batch_.u_shape_exchanged,
sizeof(size_t) * (nrank_ + 1), hipMemcpyDeviceToHost, stream_main_));
}
} // namespace hetuCTR
| d1e06613c7e75af367d7827b40d0ac6ce6cae0b9.cu | #include "hetu_gpu_table.h"
#include <cmath>
#include <cuda_runtime.h>
#include <cub/cub.cuh>
#include "common/helper_cuda.h"
namespace hetuCTR {
// This computes keys as <root_id, embedding_id>
__global__ void generate_sort_kv_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < tbl->cur_batch_.batch_size) {
index_t embedding_idx = tbl->cur_batch_.d_idx[id];
assert(embedding_idx < tbl->kEmbeddingIDMax);
worker_t r = tbl->d_root_[embedding_idx];
tbl->cur_batch_.d_idx_map[id] = embedding_idx + tbl->kEmbeddingIDMax * r;
tbl->cur_batch_.d_offset[id] = id;
}
}
__global__ void block_cvt_offset_to_shape_kernel(size_t *dst) {
size_t id = threadIdx.x;
size_t n = blockDim.x;
extern __shared__ size_t shm[];
size_t val = dst[id];
shm[id] = val;
__syncthreads();
size_t val_nxt = id == n - 1 ? val : shm[id + 1];
assert(val_nxt >= val);
dst[id] = val_nxt - val;
}
__global__ void write_sort_result_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < tbl->cur_batch_.batch_size) {
index_t arg = tbl->cur_batch_.d_sorted_arg[id];
index_t embedding_idx = tbl->cur_batch_.d_idx[arg];
tbl->cur_batch_.d_offset[id] = embedding_idx;
}
}
// This will compute cur_batch_.d_idx_map
// cur_batch_.d_root cur_batch_.u_shape
__global__ void preprocess_batch_data_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t n = tbl->cur_batch_.unique_size;
if (id < n) {
index_t uid = tbl->cur_batch_.d_unique_idx[id];
int r = tbl->d_root_[uid], r_prev;
tbl->cur_batch_.d_root[id] = r;
auto iter = tbl->table_->find(uid);
if (iter == tbl->table_->end()) {
tbl->cur_batch_.d_offset[id] = kInvalidIndex;
} else {
tbl->cur_batch_.d_offset[id] = iter->second;
}
if (id == 0) r_prev = -1;
else r_prev = tbl->d_root_[tbl->cur_batch_.d_unique_idx[id - 1]];
for (int i = r_prev + 1; i <= r; i++) {
tbl->cur_batch_.u_shape[i] = id;
}
if (id == n - 1) {
for (int i = r + 1; i <= tbl->nrank_; i++) {
tbl->cur_batch_.u_shape[i] = n;
}
}
}
}
__device__ index_t lowerBound(const index_t *data, size_t start, size_t last, index_t target) {
while (start < last) {
index_t mid = (start + last) / 2;
if (data[mid] >= target) last = mid;
else start = mid + 1;
}
return start;
}
// This computes where we can find the unique index from the original index
__global__ void compute_idx_map_kernel(HetuTable *tbl) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t n = tbl->cur_batch_.unique_size;
if (id < tbl->cur_batch_.batch_size) {
index_t embedding_idx = tbl->cur_batch_.d_idx[id];
worker_t root = tbl->d_root_[embedding_idx];
tbl->cur_batch_.d_idx_map[id] = lowerBound(tbl->cur_batch_.d_unique_idx,
tbl->cur_batch_.u_shape[root], tbl->cur_batch_.u_shape[root + 1], embedding_idx);
}
}
void HetuTable::preprocessIndex(index_t *data, size_t batch_size) {
if (batch_size == 0)
checkCudaErrors(cudaMemsetAsync(
cur_batch_.u_shape, 0, sizeof(size_t) * (nrank_ + 1), stream_main_));
// Copy batch embedding index data into Device
checkCudaErrors(cudaMemcpyAsync(
cur_batch_.d_idx, data, sizeof(index_t) * batch_size, cudaMemcpyDefault, stream_main_));
// use unused memory here to store temp sort keys
generate_sort_kv_kernel<<<DIM_GRID(batch_size), DIM_BLOCK, 0, stream_main_>>>(d_this);
// we don't need to sort all the bits when using radix sort.
// using end_bit smaller than 64 can yield corresponding performance improvement
int end_bit = std::ceil(std::log2(kEmbeddingIDMax * nrank_));
// store temp unused temp result in d_offset
checkCudaErrors(cub::DeviceRadixSort::SortPairs(
d_temp_, temp_bytes_, cur_batch_.d_idx_map, cur_batch_.d_unique_idx, cur_batch_.d_offset, cur_batch_.d_sorted_arg,
batch_size, 0, end_bit, stream_main_));
// After argsort write value to d_offset (temp, modify in next step)
write_sort_result_kernel<<<DIM_GRID(batch_size), DIM_BLOCK, 0, stream_main_>>>(d_this);
// perform unique operation, store total number of unique embedding items;
checkCudaErrors(cub::DeviceRunLengthEncode::Encode(
d_temp_, temp_bytes_, cur_batch_.d_offset, cur_batch_.d_unique_idx, cur_batch_.d_run_length,
&(d_this->cur_batch_.unique_size), batch_size, stream_main_));
checkCudaErrors(cudaMemcpyAsync(&cur_batch_.unique_size, &(d_this->cur_batch_.unique_size),
sizeof(size_t), cudaMemcpyDeviceToHost, stream_main_));
// Store the predix sum of length, this will be used in gradient reduction
// although we should compute [0, unique_size), but we don't want to sync here
checkCudaErrors(cub::DeviceScan::ExclusiveSum(d_temp_, temp_bytes_,
cur_batch_.d_run_length, cur_batch_.d_run_length, cur_batch_.batch_size + 1, stream_main_));
// Computes other preprocess data
preprocess_batch_data_kernel<<<DIM_GRID(cur_batch_.batch_size), DIM_BLOCK, 0, stream_main_>>>(d_this);
compute_idx_map_kernel<<<DIM_GRID(cur_batch_.batch_size), DIM_BLOCK, 0, stream_main_>>>(d_this);
// convert offset to shape
block_cvt_offset_to_shape_kernel<<<1, nrank_ + 1,
sizeof(size_t) * (nrank_ + 1), stream_main_>>>(cur_batch_.u_shape);
// exchange shape with other workers
all2allExchangeShape(cur_batch_.u_shape, cur_batch_.u_shape_exchanged);
checkCudaErrors(cudaMemcpyAsync(cur_batch_.h_shape, cur_batch_.u_shape,
sizeof(size_t) * (nrank_ + 1), cudaMemcpyDeviceToHost, stream_main_));
checkCudaErrors(cudaMemcpyAsync(cur_batch_.h_shape_exchanged, cur_batch_.u_shape_exchanged,
sizeof(size_t) * (nrank_ + 1), cudaMemcpyDeviceToHost, stream_main_));
}
// figure out all gradients to push
// 1. compute d_need_update_ as 0 or 1
// 2. update d_version_ (stored and root=self)
// 3. update d_updates_ (stored and root!=self)
//
__global__ void decide_update_kernel(HetuTable *tbl) {
const size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < tbl->prev_batch_.unique_size) {
version_t update_new = tbl->prev_batch_.d_run_length[id + 1] - tbl->prev_batch_.d_run_length[id];
index_t offset = tbl->prev_batch_.d_offset[id];
if (tbl->prev_batch_.d_root[id] == tbl->rank_) {
tbl->d_need_update_[id] = 0;
} else if (offset == kInvalidIndex) {
tbl->d_need_update_[id] = 1;
} else {
// assert(offset < tbl->kNonLocalStorageMax);
version_t update_local = tbl->d_updates_[offset];
tbl->d_need_update_[id] = update_local + update_new <= tbl->push_bound_ ? 0 : 1;
}
if (tbl->d_need_update_[id])
atomicAdd(&tbl->prev_batch_.u_shape[tbl->prev_batch_.d_root[id]], 1);
}
}
void HetuTable::preprocessGradient() {
checkCudaErrors(cudaMemsetAsync(prev_batch_.u_shape, 0, nrank_ * sizeof(size_t), stream_main_));
size_t num_unique = prev_batch_.unique_size;
decide_update_kernel<<<DIM_GRID(num_unique), DIM_BLOCK, 0, stream_main_>>>(d_this);
// d_update_prefix_[i] stores which index maps to the gradient communication slot i
checkCudaErrors(cub::DeviceScan::ExclusiveSum(d_temp_, temp_bytes_,
d_need_update_, d_update_prefix_, num_unique, stream_main_));
all2allExchangeShape(prev_batch_.u_shape, prev_batch_.u_shape_exchanged);
checkCudaErrors(cudaMemcpyAsync(prev_batch_.h_shape, prev_batch_.u_shape,
sizeof(size_t) * (nrank_ + 1), cudaMemcpyDeviceToHost, stream_main_));
checkCudaErrors(cudaMemcpyAsync(prev_batch_.h_shape_exchanged, prev_batch_.u_shape_exchanged,
sizeof(size_t) * (nrank_ + 1), cudaMemcpyDeviceToHost, stream_main_));
}
} // namespace hetuCTR
|
0394f5e3b8bdab8563f6a2a7aa4533e8e61ffbc0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorTransformations.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())});
auto shape = in_tensor.sizes().vec();
auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())});
auto strides = in_tensor.strides().vec();
auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())});
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size,
strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims);
});
return out_tensor;
}
template <typename scalar_t>
__global__
void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N,
int64_t roll_dim, int64_t start,
int64_t size, int64_t stride, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
AT_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "roll_cuda", [&] {
hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
});
return out_tensor;
}
}} // namespace at::native
| 0394f5e3b8bdab8563f6a2a7aa4533e8e61ffbc0.cu | #include <ATen/native/TensorTransformations.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size,
int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
kernel_pointwise_flip_apply2<scalar_t, int64_t>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())});
auto shape = in_tensor.sizes().vec();
auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())});
auto strides = in_tensor.strides().vec();
auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())});
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "flip_cuda", [&] {
flip_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size,
strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims);
});
return out_tensor;
}
template <typename scalar_t>
__global__
void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N,
int64_t roll_dim, int64_t start,
int64_t size, int64_t stride, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
AT_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, in_tensor.scalar_type(), "roll_cuda", [&] {
roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
});
return out_tensor;
}
}} // namespace at::native
|
3c4328dc7adb1ad05ccca6693e0becb4cb38e4f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaFlow.h"
__global__
void rgbaToGrayKernel(uchar3 *d_iRgb, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
uchar3 pixel = d_iRgb[idx];
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = ((float)pixel.x + (float)pixel.y + (float)pixel.z)/3;
d_iGray[idx] = d_iGray[idx] / 256.0f;
}
}
__global__
void Cv8uToGrayKernel(uchar *d_iCv8u, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = (float)d_iCv8u[idx] / 256.0f;
}
}
__global__
void Cv16uToGrayKernel(ushort *d_iCv16u, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = (float) d_iCv16u[idx] / 65536.0f;
}
}
__global__
void Cv16uTo32fKernel(ushort *d_iCv16u, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = (float)d_iCv16u[idx];
}
}
__global__
void Cv32fToGrayKernel(float *d_iCv32f, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
d_iGray[idx] = d_iCv32f[idx];
}
}
//convert RGB image (0,255) to Floating point Grayscale (0,1)s with padding to fit BLOCK MODEL
void CudaFlow::rgbToGray(uchar3 * d_iRgb, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
hipLaunchKernelGGL(( rgbaToGrayKernel) , dim3(blocks), dim3(threads) , 0, 0, d_iRgb, d_iGray, w, h, s);
}
void CudaFlow::Cv8uToGray(uchar * d_iCv8u, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv8uToGrayKernel << < blocks, threads >> >(d_iCv8u, d_iGray, w, h, s);
}
void CudaFlow::Cv16uToGray(ushort * d_iCv16u, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv16uToGrayKernel << < blocks, threads >> >(d_iCv16u, d_iGray, w, h, s);
}
void CudaFlow::Cv16uTo32f(ushort * d_iCv16u, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv16uTo32fKernel << < blocks, threads >> >(d_iCv16u, d_iGray, w, h, s);
}
void CudaFlow::Cv32fToGray(float * d_iCv32f, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv32fToGrayKernel << < blocks, threads >> >(d_iCv32f, d_iGray, w, h, s);
} | 3c4328dc7adb1ad05ccca6693e0becb4cb38e4f7.cu | #include "CudaFlow.h"
__global__
void rgbaToGrayKernel(uchar3 *d_iRgb, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
uchar3 pixel = d_iRgb[idx];
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = ((float)pixel.x + (float)pixel.y + (float)pixel.z)/3;
d_iGray[idx] = d_iGray[idx] / 256.0f;
}
}
__global__
void Cv8uToGrayKernel(uchar *d_iCv8u, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = (float)d_iCv8u[idx] / 256.0f;
}
}
__global__
void Cv16uToGrayKernel(ushort *d_iCv16u, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = (float) d_iCv16u[idx] / 65536.0f;
}
}
__global__
void Cv16uTo32fKernel(ushort *d_iCv16u, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
//d_iGray[idx] = 0.2126f * (float)pixel.x + 0.7152f * (float)pixel.y + 0.0722f * (float)pixel.z;
d_iGray[idx] = (float)d_iCv16u[idx];
}
}
__global__
void Cv32fToGrayKernel(float *d_iCv32f, float *d_iGray, int width, int height, int stride)
{
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int idx = c + stride * r; // current pixel index
d_iGray[idx] = d_iCv32f[idx];
}
}
//convert RGB image (0,255) to Floating point Grayscale (0,1)s with padding to fit BLOCK MODEL
void CudaFlow::rgbToGray(uchar3 * d_iRgb, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
rgbaToGrayKernel <<< blocks, threads >>>(d_iRgb, d_iGray, w, h, s);
}
void CudaFlow::Cv8uToGray(uchar * d_iCv8u, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv8uToGrayKernel << < blocks, threads >> >(d_iCv8u, d_iGray, w, h, s);
}
void CudaFlow::Cv16uToGray(ushort * d_iCv16u, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv16uToGrayKernel << < blocks, threads >> >(d_iCv16u, d_iGray, w, h, s);
}
void CudaFlow::Cv16uTo32f(ushort * d_iCv16u, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv16uTo32fKernel << < blocks, threads >> >(d_iCv16u, d_iGray, w, h, s);
}
void CudaFlow::Cv32fToGray(float * d_iCv32f, float *d_iGray, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
Cv32fToGrayKernel << < blocks, threads >> >(d_iCv32f, d_iGray, w, h, s);
} |
f92056f141b0fbe2fd24cf6032fe3ba2d93a36fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
//#include "caffe/vision_layers.hpp"
#include "caffe/layers/loc_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LocLossForwardGPU(const int nthreads, const Dtype* locs,
Dtype threshold, Dtype* loss_array) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype loss = (Dtype)0;
if(locs[index] < -threshold) {
loss += (locs[index] + threshold) * (locs[index] + threshold) / 2;
} else if(locs[index] > threshold) {
loss += (locs[index] - threshold) * (locs[index] - threshold) / 2;
}
loss_array[index] = loss;
}
}
template <typename Dtype>
void LocLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
string prefix = "LocLossLayer::Forward_gpu::\t";
const Dtype* locs = bottom[0]->gpu_data();
Dtype* loss_array = loss_.mutable_gpu_data();
caffe_gpu_set(loss_.count(), (Dtype)0, loss_array);
const int nthreads = N;
hipLaunchKernelGGL(( LocLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, locs, threshold, loss_array);
Dtype loss;
caffe_gpu_asum(nthreads, loss_array, &loss);
loss /= nthreads;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void LocLossBackwardGPU(const int nthreads, const Dtype* locs,
Dtype threshold, Dtype* dLocs) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(locs[index] < -threshold) {
dLocs[index] = locs[index] + threshold;
} else if(locs[index] > threshold) {
dLocs[index] = locs[index] - threshold;
}
}
}
template <typename Dtype>
void LocLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* locs = bottom[0]->gpu_data();
Dtype* dloc = bottom[0]->mutable_gpu_diff();
const int nthreads = N;
hipLaunchKernelGGL(( LocLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, locs, threshold, dloc);
caffe_gpu_scal(bottom[0]->count(), top[0]->cpu_diff()[0] / nthreads, dloc);
}
INSTANTIATE_LAYER_GPU_FUNCS(LocLossLayer);
} // namespace caffe
| f92056f141b0fbe2fd24cf6032fe3ba2d93a36fa.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
//#include "caffe/vision_layers.hpp"
#include "caffe/layers/loc_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LocLossForwardGPU(const int nthreads, const Dtype* locs,
Dtype threshold, Dtype* loss_array) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype loss = (Dtype)0;
if(locs[index] < -threshold) {
loss += (locs[index] + threshold) * (locs[index] + threshold) / 2;
} else if(locs[index] > threshold) {
loss += (locs[index] - threshold) * (locs[index] - threshold) / 2;
}
loss_array[index] = loss;
}
}
template <typename Dtype>
void LocLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
string prefix = "LocLossLayer::Forward_gpu::\t";
const Dtype* locs = bottom[0]->gpu_data();
Dtype* loss_array = loss_.mutable_gpu_data();
caffe_gpu_set(loss_.count(), (Dtype)0, loss_array);
const int nthreads = N;
LocLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, locs, threshold, loss_array);
Dtype loss;
caffe_gpu_asum(nthreads, loss_array, &loss);
loss /= nthreads;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void LocLossBackwardGPU(const int nthreads, const Dtype* locs,
Dtype threshold, Dtype* dLocs) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(locs[index] < -threshold) {
dLocs[index] = locs[index] + threshold;
} else if(locs[index] > threshold) {
dLocs[index] = locs[index] - threshold;
}
}
}
template <typename Dtype>
void LocLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* locs = bottom[0]->gpu_data();
Dtype* dloc = bottom[0]->mutable_gpu_diff();
const int nthreads = N;
LocLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, locs, threshold, dloc);
caffe_gpu_scal(bottom[0]->count(), top[0]->cpu_diff()[0] / nthreads, dloc);
}
INSTANTIATE_LAYER_GPU_FUNCS(LocLossLayer);
} // namespace caffe
|
223b7429a4828b89f549fbba6339d7e00d92c6dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:ASSERTION_ERROR
//--gridDim=1 --blockDim=4 --no-inline
__constant__ int global_constant[4];
__global__ void constant(int *in) {
global_constant[threadIdx.x] = in[threadIdx.x];
}
| 223b7429a4828b89f549fbba6339d7e00d92c6dc.cu | //xfail:ASSERTION_ERROR
//--gridDim=1 --blockDim=4 --no-inline
__constant__ int global_constant[4];
__global__ void constant(int *in) {
global_constant[threadIdx.x] = in[threadIdx.x];
}
|
7a3c6662769402fed6ea7c3b342ac8f9fe8ee111.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <iomanip>
#include <iostream>
#include <hip/hip_complex.h>
#include <helper_cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cufinufft/memtransfer.h>
#include <cufinufft/precision_independent.h>
#include <cufinufft/spreadinterp.h>
using namespace cufinufft::common;
using namespace cufinufft::memtransfer;
#include "spreadinterp3d.cuh"
namespace cufinufft {
namespace spreadinterp {
template <typename T>
int cufinufft_spread3d(int nf1, int nf2, int nf3, cuda_complex<T> *d_fw, int M, T *d_kx, T *d_ky, T *d_kz,
cuda_complex<T> *d_c, cufinufft_plan_t<T> *d_plan)
/*
This c function is written for only doing 3D spreading. See
test/spread3d_test.cu for usage.
Melody Shih 07/25/19
not allocate,transfer and free memories on gpu. Shih 09/24/20
*/
{
int ier;
d_plan->kx = d_kx;
d_plan->ky = d_ky;
d_plan->kz = d_kz;
d_plan->c = d_c;
d_plan->fw = d_fw;
d_plan->nf1 = nf1;
d_plan->nf2 = nf2;
d_plan->nf3 = nf3;
d_plan->M = M;
d_plan->maxbatchsize = 1;
ier = allocgpumem3d_plan<T>(d_plan);
ier = allocgpumem3d_nupts<T>(d_plan);
if (d_plan->opts.gpu_method == 1) {
ier = cuspread3d_nuptsdriven_prop<T>(nf1, nf2, nf3, M, d_plan);
if (ier != 0) {
printf("error: cuspread3d_nuptsdriven_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
if (d_plan->opts.gpu_method == 2) {
ier = cuspread3d_subprob_prop<T>(nf1, nf2, nf3, M, d_plan);
if (ier != 0) {
printf("error: cuspread3d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
if (d_plan->opts.gpu_method == 4) {
ier = cuspread3d_blockgather_prop<T>(nf1, nf2, nf3, M, d_plan);
if (ier != 0) {
printf("error: cuspread3d_blockgather_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
ier = cuspread3d<T>(d_plan, 1);
freegpumemory3d<T>(d_plan);
return ier;
}
template <typename T>
int cuspread3d(cufinufft_plan_t<T> *d_plan, int blksize)
/*
A wrapper for different spreading methods.
Methods available:
(1) Non-uniform points driven
(2) Subproblem
(4) Block gather
Melody Shih 07/25/19
*/
{
int nf1 = d_plan->nf1;
int nf2 = d_plan->nf2;
int nf3 = d_plan->nf3;
int M = d_plan->M;
int ier = 0;
switch (d_plan->opts.gpu_method) {
case 1: {
ier = cuspread3d_nuptsdriven<T>(nf1, nf2, nf3, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread3d_gpu_subprob" << std::endl;
return 1;
}
} break;
case 2: {
ier = cuspread3d_subprob<T>(nf1, nf2, nf3, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread3d_gpu_subprob" << std::endl;
return 1;
}
} break;
case 4: {
ier = cuspread3d_blockgather<T>(nf1, nf2, nf3, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread3d_gpu_subprob" << std::endl;
return 1;
}
} break;
default:
std::cerr << "error: incorrect method, should be 1,2,4" << std::endl;
return 2;
}
return ier;
}
template <typename T>
int cuspread3d_nuptsdriven_prop(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan) {
if (d_plan->opts.gpu_sort) {
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
if (bin_size_x < 0 || bin_size_y < 0 || bin_size_z < 0) {
std::cout << "error: invalid binsize (binsizex, binsizey, binsizez) = (";
std::cout << bin_size_x << "," << bin_size_y << "," << bin_size_z << ")" << std::endl;
return 1;
}
int numbins[3];
numbins[0] = ceil((T)nf1 / bin_size_x);
numbins[1] = ceil((T)nf2 / bin_size_y);
numbins[2] = ceil((T)nf3 / bin_size_z);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_idxnupts = d_plan->idxnupts;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(hipMemset(d_binsize, 0, numbins[0] * numbins[1] * numbins[2] * sizeof(int)));
hipLaunchKernelGGL(( calc_bin_size_noghost_3d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, nf1, nf2, nf3, bin_size_x, bin_size_y, bin_size_z,
numbins[0], numbins[1], numbins[2], d_binsize, d_kx,
d_ky, d_kz, d_sortidx, pirange);
int n = numbins[0] * numbins[1] * numbins[2];
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
hipLaunchKernelGGL(( calc_inverse_of_global_sort_index_3d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0,
M, bin_size_x, bin_size_y, bin_size_z, numbins[0], numbins[1], numbins[2], d_binstartpts, d_sortidx, d_kx,
d_ky, d_kz, d_idxnupts, pirange, nf1, nf2, nf3);
} else {
int *d_idxnupts = d_plan->idxnupts;
hipLaunchKernelGGL(( trivial_global_sort_index_3d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, d_idxnupts);
}
return 0;
}
template <typename T>
int cuspread3d_nuptsdriven(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
dim3 threadsPerBlock;
dim3 blocks;
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
T sigma = d_plan->spopts.upsampfac;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
int pirange = d_plan->spopts.pirange;
int *d_idxnupts = d_plan->idxnupts;
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
threadsPerBlock.x = 16;
threadsPerBlock.y = 1;
blocks.x = (M + threadsPerBlock.x - 1) / threadsPerBlock.x;
blocks.y = 1;
if (d_plan->opts.gpu_kerevalmeth == 1) {
for (int t = 0; t < blksize; t++) {
hipLaunchKernelGGL(( spread_3d_nupts_driven_horner), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_kx, d_ky, d_kz, d_c + t * M,
d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3,
sigma, d_idxnupts, pirange);
}
} else {
for (int t = 0; t < blksize; t++) {
hipLaunchKernelGGL(( spread_3d_nupts_driven), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_kx, d_ky, d_kz, d_c + t * M,
d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c,
es_beta, d_idxnupts, pirange);
}
}
return 0;
}
template <typename T>
int cuspread3d_blockgather_prop(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan) {
dim3 threadsPerBlock;
dim3 blocks;
int pirange = d_plan->spopts.pirange;
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int o_bin_size_x = d_plan->opts.gpu_obinsizex;
int o_bin_size_y = d_plan->opts.gpu_obinsizey;
int o_bin_size_z = d_plan->opts.gpu_obinsizez;
int numobins[3];
if (nf1 % o_bin_size_x != 0 || nf2 % o_bin_size_y != 0 || nf3 % o_bin_size_z != 0) {
std::cout << "error: mod(nf1, opts.gpu_obinsizex) != 0" << std::endl;
std::cout << " mod(nf2, opts.gpu_obinsizey) != 0" << std::endl;
std::cout << " mod(nf3, opts.gpu_obinsizez) != 0" << std::endl;
std::cout << "error: (nf1, nf2, nf3) = (" << nf1 << ", " << nf2 << ", " << nf3 << ")" << std::endl;
std::cout << "error: (obinsizex, obinsizey, obinsizez) = (" << o_bin_size_x << ", " << o_bin_size_y << ", "
<< o_bin_size_z << ")" << std::endl;
return 1;
}
numobins[0] = ceil((T)nf1 / o_bin_size_x);
numobins[1] = ceil((T)nf2 / o_bin_size_y);
numobins[2] = ceil((T)nf3 / o_bin_size_z);
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
if (o_bin_size_x % bin_size_x != 0 || o_bin_size_y % bin_size_y != 0 || o_bin_size_z % bin_size_z != 0) {
std::cout << "error: mod(ops.gpu_obinsizex, opts.gpu_binsizex) != 0" << std::endl;
std::cout << " mod(ops.gpu_obinsizey, opts.gpu_binsizey) != 0" << std::endl;
std::cout << " mod(ops.gpu_obinsizez, opts.gpu_binsizez) != 0" << std::endl;
std::cout << "error: (binsizex, binsizey, binsizez) = (" << bin_size_x << ", " << bin_size_y << ", "
<< bin_size_z << ")" << std::endl;
std::cout << "error: (obinsizex, obinsizey, obinsizez) = (" << o_bin_size_x << ", " << o_bin_size_y << ", "
<< o_bin_size_z << ")" << std::endl;
return 1;
}
int binsperobinx, binsperobiny, binsperobinz;
int numbins[3];
binsperobinx = o_bin_size_x / bin_size_x + 2;
binsperobiny = o_bin_size_y / bin_size_y + 2;
binsperobinz = o_bin_size_z / bin_size_z + 2;
numbins[0] = numobins[0] * (binsperobinx);
numbins[1] = numobins[1] * (binsperobiny);
numbins[2] = numobins[2] * (binsperobinz);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
int *d_binsize = d_plan->binsize;
int *d_sortidx = d_plan->sortidx;
int *d_binstartpts = d_plan->binstartpts;
int *d_numsubprob = d_plan->numsubprob;
int *d_idxnupts = NULL;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_subprob_to_bin = NULL;
checkCudaErrors(hipMemset(d_binsize, 0, numbins[0] * numbins[1] * numbins[2] * sizeof(int)));
hipLaunchKernelGGL(( locate_nupts_to_bins_ghost), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0,
M, bin_size_x, bin_size_y, bin_size_z, numobins[0], numobins[1], numobins[2], binsperobinx, binsperobiny,
binsperobinz, d_binsize, d_kx, d_ky, d_kz, d_sortidx, pirange, nf1, nf2, nf3);
threadsPerBlock.x = 8;
threadsPerBlock.y = 8;
threadsPerBlock.z = 8;
blocks.x = (threadsPerBlock.x + numbins[0] - 1) / threadsPerBlock.x;
blocks.y = (threadsPerBlock.y + numbins[1] - 1) / threadsPerBlock.y;
blocks.z = (threadsPerBlock.z + numbins[2] - 1) / threadsPerBlock.z;
hipLaunchKernelGGL(( fill_ghost_bins), dim3(blocks), dim3(threadsPerBlock), 0, 0, binsperobinx, binsperobiny, binsperobinz, numobins[0], numobins[1],
numobins[2], d_binsize);
int n = numbins[0] * numbins[1] * numbins[2];
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(hipMemset(d_binstartpts, 0, sizeof(int)));
int totalNUpts;
checkCudaErrors(hipMemcpy(&totalNUpts, &d_binstartpts[n], sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipMalloc(&d_idxnupts, totalNUpts * sizeof(int)));
hipLaunchKernelGGL(( calc_inverse_of_global_sort_index_ghost), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0,
M, bin_size_x, bin_size_y, bin_size_z, numobins[0], numobins[1], numobins[2], binsperobinx, binsperobiny,
binsperobinz, d_binstartpts, d_sortidx, d_kx, d_ky, d_kz, d_idxnupts, pirange, nf1, nf2, nf3);
threadsPerBlock.x = 2;
threadsPerBlock.y = 2;
threadsPerBlock.z = 2;
blocks.x = (threadsPerBlock.x + numbins[0] - 1) / threadsPerBlock.x;
blocks.y = (threadsPerBlock.y + numbins[1] - 1) / threadsPerBlock.y;
blocks.z = (threadsPerBlock.z + numbins[2] - 1) / threadsPerBlock.z;
hipLaunchKernelGGL(( ghost_bin_pts_index), dim3(blocks), dim3(threadsPerBlock), 0, 0, binsperobinx, binsperobiny, binsperobinz, numobins[0], numobins[1],
numobins[2], d_binsize, d_idxnupts, d_binstartpts, M);
if (d_plan->idxnupts != NULL)
hipFree(d_plan->idxnupts);
d_plan->idxnupts = d_idxnupts;
/* --------------------------------------------- */
// Determining Subproblem properties //
/* --------------------------------------------- */
n = numobins[0] * numobins[1] * numobins[2];
hipLaunchKernelGGL(( calc_subprob_3d_v1), dim3((n + 1024 - 1) / 1024), dim3(1024), 0, 0, binsperobinx, binsperobiny, binsperobinz, d_binsize,
d_numsubprob, maxsubprobsize,
numobins[0] * numobins[1] * numobins[2]);
n = numobins[0] * numobins[1] * numobins[2];
d_ptr = thrust::device_pointer_cast(d_numsubprob);
d_result = thrust::device_pointer_cast(d_subprobstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(hipMemset(d_subprobstartpts, 0, sizeof(int)));
int totalnumsubprob;
checkCudaErrors(hipMemcpy(&totalnumsubprob, &d_subprobstartpts[n], sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipMalloc(&d_subprob_to_bin, totalnumsubprob * sizeof(int)));
hipLaunchKernelGGL(( map_b_into_subprob_3d_v1), dim3((n + 1024 - 1) / 1024), dim3(1024), 0, 0, d_subprob_to_bin, d_subprobstartpts, d_numsubprob, n);
assert(d_subprob_to_bin != NULL);
if (d_plan->subprob_to_bin != NULL)
hipFree(d_plan->subprob_to_bin);
d_plan->subprob_to_bin = d_subprob_to_bin;
d_plan->totalnumsubprob = totalnumsubprob;
return 0;
}
template <typename T>
int cuspread3d_blockgather(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
int ns = d_plan->spopts.nspread;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
T sigma = d_plan->spopts.upsampfac;
int pirange = d_plan->spopts.pirange;
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int obin_size_x = d_plan->opts.gpu_obinsizex;
int obin_size_y = d_plan->opts.gpu_obinsizey;
int obin_size_z = d_plan->opts.gpu_obinsizez;
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
int numobins[3];
numobins[0] = ceil((T)nf1 / obin_size_x);
numobins[1] = ceil((T)nf2 / obin_size_y);
numobins[2] = ceil((T)nf3 / obin_size_z);
int binsperobinx, binsperobiny, binsperobinz;
binsperobinx = obin_size_x / bin_size_x + 2;
binsperobiny = obin_size_y / bin_size_y + 2;
binsperobinz = obin_size_z / bin_size_z + 2;
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
int *d_binstartpts = d_plan->binstartpts;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int totalnumsubprob = d_plan->totalnumsubprob;
int *d_subprob_to_bin = d_plan->subprob_to_bin;
for (int t = 0; t < blksize; t++) {
if (d_plan->opts.gpu_kerevalmeth == 1) {
size_t sharedplanorysize = obin_size_x * obin_size_y * obin_size_z * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory" << std::endl;
return 1;
}
hipLaunchKernelGGL(( spread_3d_block_gather_horner), dim3(totalnumsubprob), dim3(64), sharedplanorysize, 0,
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, sigma,
d_binstartpts, obin_size_x, obin_size_y, obin_size_z, binsperobinx * binsperobiny * binsperobinz,
d_subprob_to_bin, d_subprobstartpts, maxsubprobsize, numobins[0], numobins[1], numobins[2], d_idxnupts,
pirange);
} else {
size_t sharedplanorysize = obin_size_x * obin_size_y * obin_size_z * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory" << std::endl;
return 1;
}
hipLaunchKernelGGL(( spread_3d_block_gather), dim3(totalnumsubprob), dim3(64), sharedplanorysize, 0,
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, sigma,
d_binstartpts, obin_size_x, obin_size_y, obin_size_z, binsperobinx * binsperobiny * binsperobinz,
d_subprob_to_bin, d_subprobstartpts, maxsubprobsize, numobins[0], numobins[1], numobins[2], d_idxnupts,
pirange);
}
}
return 0;
}
template <typename T>
int cuspread3d_subprob_prop(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan) {
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
if (bin_size_x < 0 || bin_size_y < 0 || bin_size_z < 0) {
std::cout << "error: invalid binsize (binsizex, binsizey, binsizez) = (";
std::cout << bin_size_x << "," << bin_size_y << "," << bin_size_z << ")" << std::endl;
return 1;
}
int numbins[3];
numbins[0] = ceil((T)nf1 / bin_size_x);
numbins[1] = ceil((T)nf2 / bin_size_y);
numbins[2] = ceil((T)nf3 / bin_size_z);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int *d_subprob_to_bin = NULL;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(hipMemset(d_binsize, 0, numbins[0] * numbins[1] * numbins[2] * sizeof(int)));
hipLaunchKernelGGL(( calc_bin_size_noghost_3d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, nf1, nf2, nf3, bin_size_x, bin_size_y, bin_size_z,
numbins[0], numbins[1], numbins[2], d_binsize, d_kx, d_ky,
d_kz, d_sortidx, pirange);
int n = numbins[0] * numbins[1] * numbins[2];
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
hipLaunchKernelGGL(( calc_inverse_of_global_sort_index_3d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0,
M, bin_size_x, bin_size_y, bin_size_z, numbins[0], numbins[1], numbins[2], d_binstartpts, d_sortidx, d_kx, d_ky,
d_kz, d_idxnupts, pirange, nf1, nf2, nf3);
/* --------------------------------------------- */
// Determining Subproblem properties //
/* --------------------------------------------- */
hipLaunchKernelGGL(( calc_subprob_3d_v2), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, d_binsize, d_numsubprob, maxsubprobsize,
numbins[0] * numbins[1] * numbins[2]);
d_ptr = thrust::device_pointer_cast(d_numsubprob);
d_result = thrust::device_pointer_cast(d_subprobstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(hipMemset(d_subprobstartpts, 0, sizeof(int)));
int totalnumsubprob;
checkCudaErrors(hipMemcpy(&totalnumsubprob, &d_subprobstartpts[n], sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipMalloc(&d_subprob_to_bin, totalnumsubprob * sizeof(int)));
hipLaunchKernelGGL(( map_b_into_subprob_3d_v2), dim3((numbins[0] * numbins[1] + 1024 - 1) / 1024), dim3(1024), 0, 0,
d_subprob_to_bin, d_subprobstartpts, d_numsubprob, numbins[0] * numbins[1] * numbins[2]);
assert(d_subprob_to_bin != NULL);
if (d_plan->subprob_to_bin != NULL)
hipFree(d_plan->subprob_to_bin);
d_plan->subprob_to_bin = d_subprob_to_bin;
assert(d_plan->subprob_to_bin != NULL);
d_plan->totalnumsubprob = totalnumsubprob;
return 0;
}
template <typename T>
int cuspread3d_subprob(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
// assume that bin_size_x > ns/2;
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
int numbins[3];
numbins[0] = ceil((T)nf1 / bin_size_x);
numbins[1] = ceil((T)nf2 / bin_size_y);
numbins[2] = ceil((T)nf3 / bin_size_z);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int totalnumsubprob = d_plan->totalnumsubprob;
int *d_subprob_to_bin = d_plan->subprob_to_bin;
T sigma = d_plan->spopts.upsampfac;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
int pirange = d_plan->spopts.pirange;
size_t sharedplanorysize = (bin_size_x + 2 * ceil(ns / 2.0)) * (bin_size_y + 2 * ceil(ns / 2.0)) *
(bin_size_z + 2 * ceil(ns / 2.0)) * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory (" << sharedplanorysize << ")" << std::endl;
return 1;
}
for (int t = 0; t < blksize; t++) {
if (d_plan->opts.gpu_kerevalmeth) {
hipLaunchKernelGGL(( spread_3d_subprob_horner), dim3(totalnumsubprob), dim3(256), sharedplanorysize, 0,
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, sigma, d_binstartpts,
d_binsize, bin_size_x, bin_size_y, bin_size_z, d_subprob_to_bin, d_subprobstartpts, d_numsubprob,
maxsubprobsize, numbins[0], numbins[1], numbins[2], d_idxnupts, pirange);
} else {
hipLaunchKernelGGL(( spread_3d_subprob), dim3(totalnumsubprob), dim3(256), sharedplanorysize, 0,
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c, es_beta,
d_binstartpts, d_binsize, bin_size_x, bin_size_y, bin_size_z, d_subprob_to_bin, d_subprobstartpts,
d_numsubprob, maxsubprobsize, numbins[0], numbins[1], numbins[2], d_idxnupts, pirange);
}
}
return 0;
}
template int cuspread3d<float>(cufinufft_plan_t<float> *d_plan, int blksize);
template int cuspread3d<double>(cufinufft_plan_t<double> *d_plan, int blksize);
template int cufinufft_spread3d<float>(int nf1, int nf2, int nf3, cuda_complex<float> *d_fw, int M, float *d_kx,
float *d_ky, float *d_kz, cuda_complex<float> *d_c,
cufinufft_plan_t<float> *d_plan);
template int cufinufft_spread3d<double>(int nf1, int nf2, int nf3, cuda_complex<double> *d_fw, int M, double *d_kx,
double *d_ky, double *d_kz, cuda_complex<double> *d_c,
cufinufft_plan_t<double> *d_plan);
} // namespace spreadinterp
} // namespace cufinufft
| 7a3c6662769402fed6ea7c3b342ac8f9fe8ee111.cu | #include <cassert>
#include <iomanip>
#include <iostream>
#include <cuComplex.h>
#include <helper_cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cufinufft/memtransfer.h>
#include <cufinufft/precision_independent.h>
#include <cufinufft/spreadinterp.h>
using namespace cufinufft::common;
using namespace cufinufft::memtransfer;
#include "spreadinterp3d.cuh"
namespace cufinufft {
namespace spreadinterp {
template <typename T>
int cufinufft_spread3d(int nf1, int nf2, int nf3, cuda_complex<T> *d_fw, int M, T *d_kx, T *d_ky, T *d_kz,
cuda_complex<T> *d_c, cufinufft_plan_t<T> *d_plan)
/*
This c function is written for only doing 3D spreading. See
test/spread3d_test.cu for usage.
Melody Shih 07/25/19
not allocate,transfer and free memories on gpu. Shih 09/24/20
*/
{
int ier;
d_plan->kx = d_kx;
d_plan->ky = d_ky;
d_plan->kz = d_kz;
d_plan->c = d_c;
d_plan->fw = d_fw;
d_plan->nf1 = nf1;
d_plan->nf2 = nf2;
d_plan->nf3 = nf3;
d_plan->M = M;
d_plan->maxbatchsize = 1;
ier = allocgpumem3d_plan<T>(d_plan);
ier = allocgpumem3d_nupts<T>(d_plan);
if (d_plan->opts.gpu_method == 1) {
ier = cuspread3d_nuptsdriven_prop<T>(nf1, nf2, nf3, M, d_plan);
if (ier != 0) {
printf("error: cuspread3d_nuptsdriven_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
if (d_plan->opts.gpu_method == 2) {
ier = cuspread3d_subprob_prop<T>(nf1, nf2, nf3, M, d_plan);
if (ier != 0) {
printf("error: cuspread3d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
if (d_plan->opts.gpu_method == 4) {
ier = cuspread3d_blockgather_prop<T>(nf1, nf2, nf3, M, d_plan);
if (ier != 0) {
printf("error: cuspread3d_blockgather_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
ier = cuspread3d<T>(d_plan, 1);
freegpumemory3d<T>(d_plan);
return ier;
}
template <typename T>
int cuspread3d(cufinufft_plan_t<T> *d_plan, int blksize)
/*
A wrapper for different spreading methods.
Methods available:
(1) Non-uniform points driven
(2) Subproblem
(4) Block gather
Melody Shih 07/25/19
*/
{
int nf1 = d_plan->nf1;
int nf2 = d_plan->nf2;
int nf3 = d_plan->nf3;
int M = d_plan->M;
int ier = 0;
switch (d_plan->opts.gpu_method) {
case 1: {
ier = cuspread3d_nuptsdriven<T>(nf1, nf2, nf3, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread3d_gpu_subprob" << std::endl;
return 1;
}
} break;
case 2: {
ier = cuspread3d_subprob<T>(nf1, nf2, nf3, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread3d_gpu_subprob" << std::endl;
return 1;
}
} break;
case 4: {
ier = cuspread3d_blockgather<T>(nf1, nf2, nf3, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread3d_gpu_subprob" << std::endl;
return 1;
}
} break;
default:
std::cerr << "error: incorrect method, should be 1,2,4" << std::endl;
return 2;
}
return ier;
}
template <typename T>
int cuspread3d_nuptsdriven_prop(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan) {
if (d_plan->opts.gpu_sort) {
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
if (bin_size_x < 0 || bin_size_y < 0 || bin_size_z < 0) {
std::cout << "error: invalid binsize (binsizex, binsizey, binsizez) = (";
std::cout << bin_size_x << "," << bin_size_y << "," << bin_size_z << ")" << std::endl;
return 1;
}
int numbins[3];
numbins[0] = ceil((T)nf1 / bin_size_x);
numbins[1] = ceil((T)nf2 / bin_size_y);
numbins[2] = ceil((T)nf3 / bin_size_z);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_idxnupts = d_plan->idxnupts;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(cudaMemset(d_binsize, 0, numbins[0] * numbins[1] * numbins[2] * sizeof(int)));
calc_bin_size_noghost_3d<<<(M + 1024 - 1) / 1024, 1024>>>(M, nf1, nf2, nf3, bin_size_x, bin_size_y, bin_size_z,
numbins[0], numbins[1], numbins[2], d_binsize, d_kx,
d_ky, d_kz, d_sortidx, pirange);
int n = numbins[0] * numbins[1] * numbins[2];
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
calc_inverse_of_global_sort_index_3d<<<(M + 1024 - 1) / 1024, 1024>>>(
M, bin_size_x, bin_size_y, bin_size_z, numbins[0], numbins[1], numbins[2], d_binstartpts, d_sortidx, d_kx,
d_ky, d_kz, d_idxnupts, pirange, nf1, nf2, nf3);
} else {
int *d_idxnupts = d_plan->idxnupts;
trivial_global_sort_index_3d<<<(M + 1024 - 1) / 1024, 1024>>>(M, d_idxnupts);
}
return 0;
}
template <typename T>
int cuspread3d_nuptsdriven(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
dim3 threadsPerBlock;
dim3 blocks;
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
T sigma = d_plan->spopts.upsampfac;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
int pirange = d_plan->spopts.pirange;
int *d_idxnupts = d_plan->idxnupts;
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
threadsPerBlock.x = 16;
threadsPerBlock.y = 1;
blocks.x = (M + threadsPerBlock.x - 1) / threadsPerBlock.x;
blocks.y = 1;
if (d_plan->opts.gpu_kerevalmeth == 1) {
for (int t = 0; t < blksize; t++) {
spread_3d_nupts_driven_horner<<<blocks, threadsPerBlock>>>(d_kx, d_ky, d_kz, d_c + t * M,
d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3,
sigma, d_idxnupts, pirange);
}
} else {
for (int t = 0; t < blksize; t++) {
spread_3d_nupts_driven<<<blocks, threadsPerBlock>>>(d_kx, d_ky, d_kz, d_c + t * M,
d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c,
es_beta, d_idxnupts, pirange);
}
}
return 0;
}
template <typename T>
int cuspread3d_blockgather_prop(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan) {
dim3 threadsPerBlock;
dim3 blocks;
int pirange = d_plan->spopts.pirange;
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int o_bin_size_x = d_plan->opts.gpu_obinsizex;
int o_bin_size_y = d_plan->opts.gpu_obinsizey;
int o_bin_size_z = d_plan->opts.gpu_obinsizez;
int numobins[3];
if (nf1 % o_bin_size_x != 0 || nf2 % o_bin_size_y != 0 || nf3 % o_bin_size_z != 0) {
std::cout << "error: mod(nf1, opts.gpu_obinsizex) != 0" << std::endl;
std::cout << " mod(nf2, opts.gpu_obinsizey) != 0" << std::endl;
std::cout << " mod(nf3, opts.gpu_obinsizez) != 0" << std::endl;
std::cout << "error: (nf1, nf2, nf3) = (" << nf1 << ", " << nf2 << ", " << nf3 << ")" << std::endl;
std::cout << "error: (obinsizex, obinsizey, obinsizez) = (" << o_bin_size_x << ", " << o_bin_size_y << ", "
<< o_bin_size_z << ")" << std::endl;
return 1;
}
numobins[0] = ceil((T)nf1 / o_bin_size_x);
numobins[1] = ceil((T)nf2 / o_bin_size_y);
numobins[2] = ceil((T)nf3 / o_bin_size_z);
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
if (o_bin_size_x % bin_size_x != 0 || o_bin_size_y % bin_size_y != 0 || o_bin_size_z % bin_size_z != 0) {
std::cout << "error: mod(ops.gpu_obinsizex, opts.gpu_binsizex) != 0" << std::endl;
std::cout << " mod(ops.gpu_obinsizey, opts.gpu_binsizey) != 0" << std::endl;
std::cout << " mod(ops.gpu_obinsizez, opts.gpu_binsizez) != 0" << std::endl;
std::cout << "error: (binsizex, binsizey, binsizez) = (" << bin_size_x << ", " << bin_size_y << ", "
<< bin_size_z << ")" << std::endl;
std::cout << "error: (obinsizex, obinsizey, obinsizez) = (" << o_bin_size_x << ", " << o_bin_size_y << ", "
<< o_bin_size_z << ")" << std::endl;
return 1;
}
int binsperobinx, binsperobiny, binsperobinz;
int numbins[3];
binsperobinx = o_bin_size_x / bin_size_x + 2;
binsperobiny = o_bin_size_y / bin_size_y + 2;
binsperobinz = o_bin_size_z / bin_size_z + 2;
numbins[0] = numobins[0] * (binsperobinx);
numbins[1] = numobins[1] * (binsperobiny);
numbins[2] = numobins[2] * (binsperobinz);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
int *d_binsize = d_plan->binsize;
int *d_sortidx = d_plan->sortidx;
int *d_binstartpts = d_plan->binstartpts;
int *d_numsubprob = d_plan->numsubprob;
int *d_idxnupts = NULL;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_subprob_to_bin = NULL;
checkCudaErrors(cudaMemset(d_binsize, 0, numbins[0] * numbins[1] * numbins[2] * sizeof(int)));
locate_nupts_to_bins_ghost<<<(M + 1024 - 1) / 1024, 1024>>>(
M, bin_size_x, bin_size_y, bin_size_z, numobins[0], numobins[1], numobins[2], binsperobinx, binsperobiny,
binsperobinz, d_binsize, d_kx, d_ky, d_kz, d_sortidx, pirange, nf1, nf2, nf3);
threadsPerBlock.x = 8;
threadsPerBlock.y = 8;
threadsPerBlock.z = 8;
blocks.x = (threadsPerBlock.x + numbins[0] - 1) / threadsPerBlock.x;
blocks.y = (threadsPerBlock.y + numbins[1] - 1) / threadsPerBlock.y;
blocks.z = (threadsPerBlock.z + numbins[2] - 1) / threadsPerBlock.z;
fill_ghost_bins<<<blocks, threadsPerBlock>>>(binsperobinx, binsperobiny, binsperobinz, numobins[0], numobins[1],
numobins[2], d_binsize);
int n = numbins[0] * numbins[1] * numbins[2];
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(cudaMemset(d_binstartpts, 0, sizeof(int)));
int totalNUpts;
checkCudaErrors(cudaMemcpy(&totalNUpts, &d_binstartpts[n], sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMalloc(&d_idxnupts, totalNUpts * sizeof(int)));
calc_inverse_of_global_sort_index_ghost<<<(M + 1024 - 1) / 1024, 1024>>>(
M, bin_size_x, bin_size_y, bin_size_z, numobins[0], numobins[1], numobins[2], binsperobinx, binsperobiny,
binsperobinz, d_binstartpts, d_sortidx, d_kx, d_ky, d_kz, d_idxnupts, pirange, nf1, nf2, nf3);
threadsPerBlock.x = 2;
threadsPerBlock.y = 2;
threadsPerBlock.z = 2;
blocks.x = (threadsPerBlock.x + numbins[0] - 1) / threadsPerBlock.x;
blocks.y = (threadsPerBlock.y + numbins[1] - 1) / threadsPerBlock.y;
blocks.z = (threadsPerBlock.z + numbins[2] - 1) / threadsPerBlock.z;
ghost_bin_pts_index<<<blocks, threadsPerBlock>>>(binsperobinx, binsperobiny, binsperobinz, numobins[0], numobins[1],
numobins[2], d_binsize, d_idxnupts, d_binstartpts, M);
if (d_plan->idxnupts != NULL)
cudaFree(d_plan->idxnupts);
d_plan->idxnupts = d_idxnupts;
/* --------------------------------------------- */
// Determining Subproblem properties //
/* --------------------------------------------- */
n = numobins[0] * numobins[1] * numobins[2];
calc_subprob_3d_v1<<<(n + 1024 - 1) / 1024, 1024>>>(binsperobinx, binsperobiny, binsperobinz, d_binsize,
d_numsubprob, maxsubprobsize,
numobins[0] * numobins[1] * numobins[2]);
n = numobins[0] * numobins[1] * numobins[2];
d_ptr = thrust::device_pointer_cast(d_numsubprob);
d_result = thrust::device_pointer_cast(d_subprobstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(cudaMemset(d_subprobstartpts, 0, sizeof(int)));
int totalnumsubprob;
checkCudaErrors(cudaMemcpy(&totalnumsubprob, &d_subprobstartpts[n], sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMalloc(&d_subprob_to_bin, totalnumsubprob * sizeof(int)));
map_b_into_subprob_3d_v1<<<(n + 1024 - 1) / 1024, 1024>>>(d_subprob_to_bin, d_subprobstartpts, d_numsubprob, n);
assert(d_subprob_to_bin != NULL);
if (d_plan->subprob_to_bin != NULL)
cudaFree(d_plan->subprob_to_bin);
d_plan->subprob_to_bin = d_subprob_to_bin;
d_plan->totalnumsubprob = totalnumsubprob;
return 0;
}
template <typename T>
int cuspread3d_blockgather(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
int ns = d_plan->spopts.nspread;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
T sigma = d_plan->spopts.upsampfac;
int pirange = d_plan->spopts.pirange;
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int obin_size_x = d_plan->opts.gpu_obinsizex;
int obin_size_y = d_plan->opts.gpu_obinsizey;
int obin_size_z = d_plan->opts.gpu_obinsizez;
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
int numobins[3];
numobins[0] = ceil((T)nf1 / obin_size_x);
numobins[1] = ceil((T)nf2 / obin_size_y);
numobins[2] = ceil((T)nf3 / obin_size_z);
int binsperobinx, binsperobiny, binsperobinz;
binsperobinx = obin_size_x / bin_size_x + 2;
binsperobiny = obin_size_y / bin_size_y + 2;
binsperobinz = obin_size_z / bin_size_z + 2;
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
int *d_binstartpts = d_plan->binstartpts;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int totalnumsubprob = d_plan->totalnumsubprob;
int *d_subprob_to_bin = d_plan->subprob_to_bin;
for (int t = 0; t < blksize; t++) {
if (d_plan->opts.gpu_kerevalmeth == 1) {
size_t sharedplanorysize = obin_size_x * obin_size_y * obin_size_z * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory" << std::endl;
return 1;
}
spread_3d_block_gather_horner<<<totalnumsubprob, 64, sharedplanorysize>>>(
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, sigma,
d_binstartpts, obin_size_x, obin_size_y, obin_size_z, binsperobinx * binsperobiny * binsperobinz,
d_subprob_to_bin, d_subprobstartpts, maxsubprobsize, numobins[0], numobins[1], numobins[2], d_idxnupts,
pirange);
} else {
size_t sharedplanorysize = obin_size_x * obin_size_y * obin_size_z * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory" << std::endl;
return 1;
}
spread_3d_block_gather<<<totalnumsubprob, 64, sharedplanorysize>>>(
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, sigma,
d_binstartpts, obin_size_x, obin_size_y, obin_size_z, binsperobinx * binsperobiny * binsperobinz,
d_subprob_to_bin, d_subprobstartpts, maxsubprobsize, numobins[0], numobins[1], numobins[2], d_idxnupts,
pirange);
}
}
return 0;
}
template <typename T>
int cuspread3d_subprob_prop(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan) {
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
if (bin_size_x < 0 || bin_size_y < 0 || bin_size_z < 0) {
std::cout << "error: invalid binsize (binsizex, binsizey, binsizez) = (";
std::cout << bin_size_x << "," << bin_size_y << "," << bin_size_z << ")" << std::endl;
return 1;
}
int numbins[3];
numbins[0] = ceil((T)nf1 / bin_size_x);
numbins[1] = ceil((T)nf2 / bin_size_y);
numbins[2] = ceil((T)nf3 / bin_size_z);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int *d_subprob_to_bin = NULL;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(cudaMemset(d_binsize, 0, numbins[0] * numbins[1] * numbins[2] * sizeof(int)));
calc_bin_size_noghost_3d<<<(M + 1024 - 1) / 1024, 1024>>>(M, nf1, nf2, nf3, bin_size_x, bin_size_y, bin_size_z,
numbins[0], numbins[1], numbins[2], d_binsize, d_kx, d_ky,
d_kz, d_sortidx, pirange);
int n = numbins[0] * numbins[1] * numbins[2];
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
calc_inverse_of_global_sort_index_3d<<<(M + 1024 - 1) / 1024, 1024>>>(
M, bin_size_x, bin_size_y, bin_size_z, numbins[0], numbins[1], numbins[2], d_binstartpts, d_sortidx, d_kx, d_ky,
d_kz, d_idxnupts, pirange, nf1, nf2, nf3);
/* --------------------------------------------- */
// Determining Subproblem properties //
/* --------------------------------------------- */
calc_subprob_3d_v2<<<(M + 1024 - 1) / 1024, 1024>>>(d_binsize, d_numsubprob, maxsubprobsize,
numbins[0] * numbins[1] * numbins[2]);
d_ptr = thrust::device_pointer_cast(d_numsubprob);
d_result = thrust::device_pointer_cast(d_subprobstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(cudaMemset(d_subprobstartpts, 0, sizeof(int)));
int totalnumsubprob;
checkCudaErrors(cudaMemcpy(&totalnumsubprob, &d_subprobstartpts[n], sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMalloc(&d_subprob_to_bin, totalnumsubprob * sizeof(int)));
map_b_into_subprob_3d_v2<<<(numbins[0] * numbins[1] + 1024 - 1) / 1024, 1024>>>(
d_subprob_to_bin, d_subprobstartpts, d_numsubprob, numbins[0] * numbins[1] * numbins[2]);
assert(d_subprob_to_bin != NULL);
if (d_plan->subprob_to_bin != NULL)
cudaFree(d_plan->subprob_to_bin);
d_plan->subprob_to_bin = d_subprob_to_bin;
assert(d_plan->subprob_to_bin != NULL);
d_plan->totalnumsubprob = totalnumsubprob;
return 0;
}
template <typename T>
int cuspread3d_subprob(int nf1, int nf2, int nf3, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
// assume that bin_size_x > ns/2;
int bin_size_x = d_plan->opts.gpu_binsizex;
int bin_size_y = d_plan->opts.gpu_binsizey;
int bin_size_z = d_plan->opts.gpu_binsizez;
int numbins[3];
numbins[0] = ceil((T)nf1 / bin_size_x);
numbins[1] = ceil((T)nf2 / bin_size_y);
numbins[2] = ceil((T)nf3 / bin_size_z);
T *d_kx = d_plan->kx;
T *d_ky = d_plan->ky;
T *d_kz = d_plan->kz;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int totalnumsubprob = d_plan->totalnumsubprob;
int *d_subprob_to_bin = d_plan->subprob_to_bin;
T sigma = d_plan->spopts.upsampfac;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
int pirange = d_plan->spopts.pirange;
size_t sharedplanorysize = (bin_size_x + 2 * ceil(ns / 2.0)) * (bin_size_y + 2 * ceil(ns / 2.0)) *
(bin_size_z + 2 * ceil(ns / 2.0)) * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory (" << sharedplanorysize << ")" << std::endl;
return 1;
}
for (int t = 0; t < blksize; t++) {
if (d_plan->opts.gpu_kerevalmeth) {
spread_3d_subprob_horner<<<totalnumsubprob, 256, sharedplanorysize>>>(
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, sigma, d_binstartpts,
d_binsize, bin_size_x, bin_size_y, bin_size_z, d_subprob_to_bin, d_subprobstartpts, d_numsubprob,
maxsubprobsize, numbins[0], numbins[1], numbins[2], d_idxnupts, pirange);
} else {
spread_3d_subprob<<<totalnumsubprob, 256, sharedplanorysize>>>(
d_kx, d_ky, d_kz, d_c + t * M, d_fw + t * nf1 * nf2 * nf3, M, ns, nf1, nf2, nf3, es_c, es_beta,
d_binstartpts, d_binsize, bin_size_x, bin_size_y, bin_size_z, d_subprob_to_bin, d_subprobstartpts,
d_numsubprob, maxsubprobsize, numbins[0], numbins[1], numbins[2], d_idxnupts, pirange);
}
}
return 0;
}
template int cuspread3d<float>(cufinufft_plan_t<float> *d_plan, int blksize);
template int cuspread3d<double>(cufinufft_plan_t<double> *d_plan, int blksize);
template int cufinufft_spread3d<float>(int nf1, int nf2, int nf3, cuda_complex<float> *d_fw, int M, float *d_kx,
float *d_ky, float *d_kz, cuda_complex<float> *d_c,
cufinufft_plan_t<float> *d_plan);
template int cufinufft_spread3d<double>(int nf1, int nf2, int nf3, cuda_complex<double> *d_fw, int M, double *d_kx,
double *d_ky, double *d_kz, cuda_complex<double> *d_c,
cufinufft_plan_t<double> *d_plan);
} // namespace spreadinterp
} // namespace cufinufft
|
0bfdb45785150f95f108871ff44a1d2c316f50b6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
/*
* The program's performance is dominated by the computation on the
* execution engine (EE) while memory copies between Host and Device
* using the copy engine (CE) are significantly less time consuming.
*
* This version uses a user allocated stream and asynchronous memory
* copy operations (hipMemcpyAsync()). Cuda kernel invocations on the
* stream are also asynchronous. hipStreamSynchronize() is used to
* synchronize with both the copy and kernel executions. Host pinned
* memory is not used because the copy operations are not a significant
* element of performance.
*
* The program depends on two input files containing the image
* representations for the left and right stereo images
* (stereo.im0.640x533.ppm and stereo.im1.640x533.ppm)
* which must be in the directory with the executable.
*
* Modified by Don Smith, Department of Computer Science,
* University of North Carolina at Chapel Hill
* 2015
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/types.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
// includes, kernels
// For the CUDA runtime routines (prefixed with "cuda")
#include <hip/hip_runtime.h>
// the kernel code
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
unsigned int numData;
dim3 numThreads;
dim3 numBlocks;
unsigned int *h_odata;
unsigned int *d_odata, *d_img0, *d_img1;
unsigned int memSize;
hipStream_t my_stream;
unsigned char *h_img0;
unsigned char *h_img1;
int minDisp;
int maxDisp;
unsigned int w, h;
void stereoDisparity() {
//initalize the memory for output data to zeros
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// copy host memory with images to device
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(hipMemcpyAsync(d_img0, h_img0, memSize, hipMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
hipStreamSynchronize(my_stream);
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(hipMemcpyAsync(d_img1, h_img1, memSize, hipMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
hipStreamSynchronize(my_stream);
// copy host memory that was set to zero to initialize device output
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(hipMemcpyAsync(d_odata, h_odata, memSize, hipMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
hipStreamSynchronize(my_stream);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state)
// lock of EE is handled in wrapper for hipLaunch()
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, my_stream, d_img0, d_img1, d_odata, w, h, minDisp/2, maxDisp);
// synchronize with the stream after kernel execution
// the wrapper for this function releases any lock held (EE here)
hipStreamSynchronize(my_stream);
// copy host memory that was set to zero to initialize device output
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(hipMemcpyAsync(d_odata, h_odata, memSize, hipMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
hipStreamSynchronize(my_stream);
// launch the stereoDisparity kernel
// lock of EE is handled in wrapper for hipLaunch()
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, my_stream, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// synchronize with the stream after kernel execution
// the wrapper for this function releases any lock held (EE here)
hipStreamSynchronize(my_stream);
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
//Copy result from device to host for verification
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(hipMemcpyAsync(h_odata, d_odata, memSize, hipMemcpyDeviceToHost, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
hipStreamSynchronize(my_stream);
#ifdef PRINT_CHECKSUM
// calculate sum of resultant GPU image
// This verification is applied only to the
// last result computed
unsigned int checkSum = 0;
for (unsigned int i=0 ; i <w *h ; i++) {
checkSum += h_odata[i];
}
if (checkSum == 4293895789) //valid checksum only for these two images
printf("Test PASSED\n");
else {
fprintf(stderr, "Verification failed, GPU Checksum = %u, ", checkSum);
exit(-1);
}
#endif
#ifdef WRITE_DISPARITY
// write out the resulting disparity image.
// creates file in directory containing executable
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
char fnameOut[50] = "";
strcat(fnameOut, "output_GPU.pgm");
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
if (dispOut != NULL) free(dispOut);
#endif
// prepare to clean up
// wrapper will release any lock held
hipDeviceSynchronize();
// cleanup device memory
checkCudaErrors(hipFree(d_odata));
checkCudaErrors(hipFree(d_img0));
checkCudaErrors(hipFree(d_img1));
// cleanup host memory
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
// finish clean up with deleting the user-created stream
hipStreamDestroy(my_stream);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
}
int main(int argc, char **argv)
{
int sync_level = 2; //default -- process blocking
/*
* The only parameter is an integer that indicates the desired level of
* synchronization used by the GPU driver (values defined below). The
* specified level is used in hipSetDeviceFlags() to set the level
* prior to initialization.
*/
if (argc == 2)
sync_level = atoi(argv[1]);
// level 0 - spin polling (busy waiting) for GPU to finish
// level 1 - yield each time through the polling loop to let another thread run
// level 2 - block process waiting for GPU to finish
switch (sync_level)
{
case 0:
hipSetDeviceFlags(hipDeviceScheduleSpin);
break;
case 1:
hipSetDeviceFlags(hipDeviceScheduleYield);
break;
default:
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
}
// follow convention and initialize CUDA/GPU
// used here to invoke initialization of GPU locking
hipFree(0);
// use device 0, the only one on a TK1
hipSetDevice(0);
// create a user-defined stream
hipStreamCreate(&my_stream);
// Search paramters
minDisp = -16;
maxDisp = 0;
// Load image data
// functions allocate memory for the images on host side
// initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
h_img0 = NULL;
h_img1 = NULL;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
exit(-1);
}
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
exit(-1);
}
// set up parameters used in the rest of program
numThreads = dim3(blockSize_x, blockSize_y, 1);
numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
numData = w*h;
memSize = sizeof(int) * numData;
//allocate memory for the result on host side
h_odata = (unsigned int *)malloc(memSize);
// allocate device memory for inputs and result
checkCudaErrors(hipMalloc((void **) &d_odata, memSize));
checkCudaErrors(hipMalloc((void **) &d_img0, memSize));
checkCudaErrors(hipMalloc((void **) &d_img1, memSize));
// more setup for using the GPU
size_t offset = 0;
hipChannelFormatDesc ca_desc0 = hipCreateChannelDesc<unsigned int>();
hipChannelFormatDesc ca_desc1 = hipCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = hipAddressModeClamp;
tex2Dleft.addressMode[1] = hipAddressModeClamp;
tex2Dleft.filterMode = hipFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = hipAddressModeClamp;
tex2Dright.addressMode[1] = hipAddressModeClamp;
tex2Dright.filterMode = hipFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(hipBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(hipBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// all setup and initialization complete, start iterations
stereoDisparity();
}
| 0bfdb45785150f95f108871ff44a1d2c316f50b6.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
/*
* The program's performance is dominated by the computation on the
* execution engine (EE) while memory copies between Host and Device
* using the copy engine (CE) are significantly less time consuming.
*
* This version uses a user allocated stream and asynchronous memory
* copy operations (cudaMemcpyAsync()). Cuda kernel invocations on the
* stream are also asynchronous. cudaStreamSynchronize() is used to
* synchronize with both the copy and kernel executions. Host pinned
* memory is not used because the copy operations are not a significant
* element of performance.
*
* The program depends on two input files containing the image
* representations for the left and right stereo images
* (stereo.im0.640x533.ppm and stereo.im1.640x533.ppm)
* which must be in the directory with the executable.
*
* Modified by Don Smith, Department of Computer Science,
* University of North Carolina at Chapel Hill
* 2015
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/types.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
// includes, kernels
// For the CUDA runtime routines (prefixed with "cuda")
#include <cuda_runtime.h>
// the kernel code
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
unsigned int numData;
dim3 numThreads;
dim3 numBlocks;
unsigned int *h_odata;
unsigned int *d_odata, *d_img0, *d_img1;
unsigned int memSize;
cudaStream_t my_stream;
unsigned char *h_img0;
unsigned char *h_img1;
int minDisp;
int maxDisp;
unsigned int w, h;
void stereoDisparity() {
//initalize the memory for output data to zeros
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// copy host memory with images to device
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(cudaMemcpyAsync(d_img0, h_img0, memSize, cudaMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(cudaMemcpyAsync(d_img1, h_img1, memSize, cudaMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
// copy host memory that was set to zero to initialize device output
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(cudaMemcpyAsync(d_odata, h_odata, memSize, cudaMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state)
// lock of EE is handled in wrapper for cudaLaunch()
stereoDisparityKernel<<<numBlocks, numThreads, 0, my_stream>>>(d_img0, d_img1, d_odata, w, h, minDisp/2, maxDisp);
// synchronize with the stream after kernel execution
// the wrapper for this function releases any lock held (EE here)
cudaStreamSynchronize(my_stream);
// copy host memory that was set to zero to initialize device output
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(cudaMemcpyAsync(d_odata, h_odata, memSize, cudaMemcpyHostToDevice, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
// launch the stereoDisparity kernel
// lock of EE is handled in wrapper for cudaLaunch()
stereoDisparityKernel<<<numBlocks, numThreads, 0, my_stream>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// synchronize with the stream after kernel execution
// the wrapper for this function releases any lock held (EE here)
cudaStreamSynchronize(my_stream);
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
//Copy result from device to host for verification
// this call is asynchronous so only the lock of CE can be handled in the wrapper
checkCudaErrors(cudaMemcpyAsync(h_odata, d_odata, memSize, cudaMemcpyDeviceToHost, my_stream));
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(my_stream);
#ifdef PRINT_CHECKSUM
// calculate sum of resultant GPU image
// This verification is applied only to the
// last result computed
unsigned int checkSum = 0;
for (unsigned int i=0 ; i <w *h ; i++) {
checkSum += h_odata[i];
}
if (checkSum == 4293895789) //valid checksum only for these two images
printf("Test PASSED\n");
else {
fprintf(stderr, "Verification failed, GPU Checksum = %u, ", checkSum);
exit(-1);
}
#endif
#ifdef WRITE_DISPARITY
// write out the resulting disparity image.
// creates file in directory containing executable
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
char fnameOut[50] = "";
strcat(fnameOut, "output_GPU.pgm");
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
if (dispOut != NULL) free(dispOut);
#endif
// prepare to clean up
// wrapper will release any lock held
cudaDeviceSynchronize();
// cleanup device memory
checkCudaErrors(cudaFree(d_odata));
checkCudaErrors(cudaFree(d_img0));
checkCudaErrors(cudaFree(d_img1));
// cleanup host memory
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
// finish clean up with deleting the user-created stream
cudaStreamDestroy(my_stream);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
}
int main(int argc, char **argv)
{
int sync_level = 2; //default -- process blocking
/*
* The only parameter is an integer that indicates the desired level of
* synchronization used by the GPU driver (values defined below). The
* specified level is used in cudaSetDeviceFlags() to set the level
* prior to initialization.
*/
if (argc == 2)
sync_level = atoi(argv[1]);
// level 0 - spin polling (busy waiting) for GPU to finish
// level 1 - yield each time through the polling loop to let another thread run
// level 2 - block process waiting for GPU to finish
switch (sync_level)
{
case 0:
cudaSetDeviceFlags(cudaDeviceScheduleSpin);
break;
case 1:
cudaSetDeviceFlags(cudaDeviceScheduleYield);
break;
default:
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
}
// follow convention and initialize CUDA/GPU
// used here to invoke initialization of GPU locking
cudaFree(0);
// use device 0, the only one on a TK1
cudaSetDevice(0);
// create a user-defined stream
cudaStreamCreate(&my_stream);
// Search paramters
minDisp = -16;
maxDisp = 0;
// Load image data
// functions allocate memory for the images on host side
// initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
h_img0 = NULL;
h_img1 = NULL;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
exit(-1);
}
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
exit(-1);
}
// set up parameters used in the rest of program
numThreads = dim3(blockSize_x, blockSize_y, 1);
numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
numData = w*h;
memSize = sizeof(int) * numData;
//allocate memory for the result on host side
h_odata = (unsigned int *)malloc(memSize);
// allocate device memory for inputs and result
checkCudaErrors(cudaMalloc((void **) &d_odata, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img0, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img1, memSize));
// more setup for using the GPU
size_t offset = 0;
cudaChannelFormatDesc ca_desc0 = cudaCreateChannelDesc<unsigned int>();
cudaChannelFormatDesc ca_desc1 = cudaCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = cudaAddressModeClamp;
tex2Dleft.addressMode[1] = cudaAddressModeClamp;
tex2Dleft.filterMode = cudaFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = cudaAddressModeClamp;
tex2Dright.addressMode[1] = cudaAddressModeClamp;
tex2Dright.filterMode = cudaFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// all setup and initialization complete, start iterations
stereoDisparity();
}
|
3c5b08449be4417e4364a8474226ec00bbd45f57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////For the second iteration, after data size 1gb, L2 tlb misses sparsely appear.
///////////After data size 512MB, L1 tlb misses sparsely appear.
///////////For the first iteration, managed memory migrates pages on demand.
///////////After the migration, L1 and L2 tlbs of the page will be filled, L2 cache will also be prefetched.
///////////1700s and 1900s are coincidence, but 1600s is not.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
//for (long long int it = 0; it < iterations; it++){
// C[it] = s_index[it];
// D[it] = s_tvalue[it];
//}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
//for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){
for(int data_stride = 1 * 4 * 1024; data_stride <= 1 * 4 * 1024; data_stride = data_stride * 2){
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
//for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
for(long long int mod2 = 2147483648; mod2 <= 2147483648; mod2 = mod2 * 2){
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
//for (long long int it = 0; it < reduced_iter; it++){
// fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
//}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| 3c5b08449be4417e4364a8474226ec00bbd45f57.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////For the second iteration, after data size 1gb, L2 tlb misses sparsely appear.
///////////After data size 512MB, L1 tlb misses sparsely appear.
///////////For the first iteration, managed memory migrates pages on demand.
///////////After the migration, L1 and L2 tlbs of the page will be filled, L2 cache will also be prefetched.
///////////1700s and 1900s are coincidence, but 1600s is not.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
//for (long long int it = 0; it < iterations; it++){
// C[it] = s_index[it];
// D[it] = s_tvalue[it];
//}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
//for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){
for(int data_stride = 1 * 4 * 1024; data_stride <= 1 * 4 * 1024; data_stride = data_stride * 2){
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
//for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
for(long long int mod2 = 2147483648; mod2 <= 2147483648; mod2 = mod2 * 2){
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
//for (long long int it = 0; it < reduced_iter; it++){
// fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
//}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
e46f239da7fb962a3f486616e02fdc58833440a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// @file
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Copyright (C) 2016/17 Christian Lessig, Otto-von-Guericke Universitaet Magdeburg
///
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// module : tutorial 6
///
/// author : [email protected]
///
/// project : GPU Programming
///
/// description: CUDA matrix transpose
///
////////////////////////////////////////////////////////////////////////////////////////////////////
// includes, system
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <cassert>
typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint;
// includes, project
#include "cuda_util.h"
const unsigned int Tile_Size = 32;
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix transpose (no bank conflicts)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
transposeMatrix3( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float sdata[Tile_Size][Tile_Size+1];
sdata[threadIdx.y][threadIdx.x] = data_in[tid_row * mat_size + tid_col];
__syncthreads();
tid_col = blockIdx.y * blockDim.x + threadIdx.x;
tid_row = blockIdx.x * blockDim.y + threadIdx.y;
data_out[tid_row * mat_size + tid_col] = sdata[threadIdx.x][threadIdx.y];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix transpose (shared memory to ensure coalesced reads and writes)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
transposeMatrix2( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float sdata[Tile_Size][Tile_Size];
sdata[threadIdx.x][threadIdx.y] = data_in[tid_row * mat_size + tid_col];
__syncthreads();
tid_col = blockIdx.y * blockDim.y + threadIdx.x;
tid_row = blockIdx.x * blockDim.y + threadIdx.y;
data_out[tid_row * mat_size + tid_col] = sdata[threadIdx.y][threadIdx.x];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix transpose (naive implementation)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
transposeMatrix1( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
data_out[tid_col * mat_size + tid_row] = data_in[tid_row * mat_size + tid_col];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix copy (as reference for maximal attainable performance)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
copyMatrix( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
data_out[tid_row * mat_size + tid_col] = data_in[tid_row * mat_size + tid_col];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// initialize Cuda device
////////////////////////////////////////////////////////////////////////////////////////////////////
bool
initDevice( int& device_handle, int& max_threads_per_block) {
int deviceCount = 0;
checkErrorsCuda( hipGetDeviceCount(&deviceCount));
if( 0 == deviceCount) {
std::cerr << "initDevice() : No CUDA device found." << std::endl;
return false;
}
// one could implement more complex logic here to find the fastest device
if( deviceCount > 1) {
std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl;
}
// set the device
checkErrorsCuda( hipSetDevice( device_handle));
hipDeviceProp_t device_props;
checkErrorsCuda( hipGetDeviceProperties(&device_props, device_handle));
max_threads_per_block = device_props.maxThreadsPerBlock;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// program entry point
////////////////////////////////////////////////////////////////////////////////////////////////////
int
main( int /*argc*/, char** /*argv*/ ) {
// check execution environment
int device_handle = 0;
int max_threads_per_block = 0;
if( ! initDevice( device_handle, max_threads_per_block)) {
return EXIT_FAILURE;
}
const int mat_size = 8192;
// input matrix
std::vector<float> mat_in( mat_size * mat_size);
std::generate( mat_in.begin(), mat_in.end(), std::rand);
// initialize memory
float* mat_in_device = nullptr;
float* mat_out_device = nullptr;
// allocate device memory
checkErrorsCuda( hipMalloc((void **) &mat_in_device, sizeof(float) * mat_size * mat_size));
checkErrorsCuda( hipMalloc((void **) &mat_out_device, sizeof(float) * mat_size * mat_size));
// copy device memory
checkErrorsCuda( hipMemcpy( (void*) mat_in_device, mat_in.data(),
sizeof(float) * mat_size * mat_size,
hipMemcpyHostToDevice ));
// determine thread layout
int max_threads_per_block_sqrt = std::sqrt( max_threads_per_block);
assert( max_threads_per_block_sqrt * max_threads_per_block_sqrt == max_threads_per_block);
assert( max_threads_per_block_sqrt == Tile_Size);
dim3 num_threads( ::min( mat_size, max_threads_per_block_sqrt),
::min( mat_size, max_threads_per_block_sqrt) );
dim3 num_blocks( mat_size / num_threads.x, mat_size / num_threads.y);
num_blocks.x += ( 0 == num_blocks.x) ? 1 : 0;
num_blocks.y += ( 0 == num_blocks.y) ? 1 : 0;
std::cout << "num_blocks = " << num_blocks.x << " / " << num_blocks.y << std::endl;
std::cout << "num_threads_per_block = " << num_threads.x << " / "
<< num_threads.y << std::endl;
// run kernel
hipDeviceSynchronize();
tpoint t_start = std::chrono::high_resolution_clock::now();
#if 0
hipLaunchKernelGGL(( transposeMatrix1), dim3(num_blocks), dim3(num_threads), 0, 0, mat_in_device, mat_out_device, mat_size);
#endif
#if 0
hipLaunchKernelGGL(( transposeMatrix2), dim3(num_blocks), dim3(num_threads), 0, 0, mat_in_device, mat_out_device, mat_size);
#endif
#if 1
hipLaunchKernelGGL(( transposeMatrix3), dim3(num_blocks), dim3(num_threads), 0, 0, mat_in_device, mat_out_device, mat_size);
#endif
hipDeviceSynchronize();
tpoint t_end = std::chrono::high_resolution_clock::now();
double wall_clock = std::chrono::duration<double, std::milli>(t_end-t_start).count();
std::cerr << "Execution time: " << wall_clock << " ms."<< std::endl;
checkLastCudaError("Kernel execution failed");
// copy result back to host
std::vector<float> mat_out( mat_size * mat_size);
checkErrorsCuda( hipMemcpy( mat_out.data(), mat_out_device,
sizeof(float) * mat_size * mat_size,
hipMemcpyDeviceToHost ));
#if 1
// check result
for( unsigned int row = 0; row < mat_size; ++row) {
for( unsigned int col = 0; col < mat_size; ++col) {
if( mat_out[col * mat_size + row] != mat_in[row * mat_size + col]) {
std::cerr << "Transpose error at (" << row << "," << col << ")" << std::endl;
}
}
}
#endif
// clean up device memory
checkErrorsCuda( hipFree( mat_in_device));
checkErrorsCuda( hipFree( mat_out_device));
return EXIT_SUCCESS;
}
| e46f239da7fb962a3f486616e02fdc58833440a5.cu | /// @file
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Copyright (C) 2016/17 Christian Lessig, Otto-von-Guericke Universitaet Magdeburg
///
////////////////////////////////////////////////////////////////////////////////////////////////////
///
/// module : tutorial 6
///
/// author : [email protected]
///
/// project : GPU Programming
///
/// description: CUDA matrix transpose
///
////////////////////////////////////////////////////////////////////////////////////////////////////
// includes, system
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <cassert>
typedef std::chrono::time_point<std::chrono::high_resolution_clock> tpoint;
// includes, project
#include "cuda_util.h"
const unsigned int Tile_Size = 32;
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix transpose (no bank conflicts)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
transposeMatrix3( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float sdata[Tile_Size][Tile_Size+1];
sdata[threadIdx.y][threadIdx.x] = data_in[tid_row * mat_size + tid_col];
__syncthreads();
tid_col = blockIdx.y * blockDim.x + threadIdx.x;
tid_row = blockIdx.x * blockDim.y + threadIdx.y;
data_out[tid_row * mat_size + tid_col] = sdata[threadIdx.x][threadIdx.y];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix transpose (shared memory to ensure coalesced reads and writes)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
transposeMatrix2( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float sdata[Tile_Size][Tile_Size];
sdata[threadIdx.x][threadIdx.y] = data_in[tid_row * mat_size + tid_col];
__syncthreads();
tid_col = blockIdx.y * blockDim.y + threadIdx.x;
tid_row = blockIdx.x * blockDim.y + threadIdx.y;
data_out[tid_row * mat_size + tid_col] = sdata[threadIdx.y][threadIdx.x];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix transpose (naive implementation)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
transposeMatrix1( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
data_out[tid_col * mat_size + tid_row] = data_in[tid_row * mat_size + tid_col];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//! Matrix copy (as reference for maximal attainable performance)
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void
copyMatrix( float* data_in, float* data_out, unsigned int mat_size) {
int tid_col = blockIdx.x * blockDim.x + threadIdx.x;
int tid_row = blockIdx.y * blockDim.y + threadIdx.y;
data_out[tid_row * mat_size + tid_col] = data_in[tid_row * mat_size + tid_col];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// initialize Cuda device
////////////////////////////////////////////////////////////////////////////////////////////////////
bool
initDevice( int& device_handle, int& max_threads_per_block) {
int deviceCount = 0;
checkErrorsCuda( cudaGetDeviceCount(&deviceCount));
if( 0 == deviceCount) {
std::cerr << "initDevice() : No CUDA device found." << std::endl;
return false;
}
// one could implement more complex logic here to find the fastest device
if( deviceCount > 1) {
std::cerr << "initDevice() : Multiple CUDA devices found. Using first one." << std::endl;
}
// set the device
checkErrorsCuda( cudaSetDevice( device_handle));
cudaDeviceProp device_props;
checkErrorsCuda( cudaGetDeviceProperties(&device_props, device_handle));
max_threads_per_block = device_props.maxThreadsPerBlock;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// program entry point
////////////////////////////////////////////////////////////////////////////////////////////////////
int
main( int /*argc*/, char** /*argv*/ ) {
// check execution environment
int device_handle = 0;
int max_threads_per_block = 0;
if( ! initDevice( device_handle, max_threads_per_block)) {
return EXIT_FAILURE;
}
const int mat_size = 8192;
// input matrix
std::vector<float> mat_in( mat_size * mat_size);
std::generate( mat_in.begin(), mat_in.end(), std::rand);
// initialize memory
float* mat_in_device = nullptr;
float* mat_out_device = nullptr;
// allocate device memory
checkErrorsCuda( cudaMalloc((void **) &mat_in_device, sizeof(float) * mat_size * mat_size));
checkErrorsCuda( cudaMalloc((void **) &mat_out_device, sizeof(float) * mat_size * mat_size));
// copy device memory
checkErrorsCuda( cudaMemcpy( (void*) mat_in_device, mat_in.data(),
sizeof(float) * mat_size * mat_size,
cudaMemcpyHostToDevice ));
// determine thread layout
int max_threads_per_block_sqrt = std::sqrt( max_threads_per_block);
assert( max_threads_per_block_sqrt * max_threads_per_block_sqrt == max_threads_per_block);
assert( max_threads_per_block_sqrt == Tile_Size);
dim3 num_threads( std::min( mat_size, max_threads_per_block_sqrt),
std::min( mat_size, max_threads_per_block_sqrt) );
dim3 num_blocks( mat_size / num_threads.x, mat_size / num_threads.y);
num_blocks.x += ( 0 == num_blocks.x) ? 1 : 0;
num_blocks.y += ( 0 == num_blocks.y) ? 1 : 0;
std::cout << "num_blocks = " << num_blocks.x << " / " << num_blocks.y << std::endl;
std::cout << "num_threads_per_block = " << num_threads.x << " / "
<< num_threads.y << std::endl;
// run kernel
cudaDeviceSynchronize();
tpoint t_start = std::chrono::high_resolution_clock::now();
#if 0
transposeMatrix1<<<num_blocks, num_threads>>>( mat_in_device, mat_out_device, mat_size);
#endif
#if 0
transposeMatrix2<<<num_blocks, num_threads>>>( mat_in_device, mat_out_device, mat_size);
#endif
#if 1
transposeMatrix3<<<num_blocks, num_threads>>>( mat_in_device, mat_out_device, mat_size);
#endif
cudaDeviceSynchronize();
tpoint t_end = std::chrono::high_resolution_clock::now();
double wall_clock = std::chrono::duration<double, std::milli>(t_end-t_start).count();
std::cerr << "Execution time: " << wall_clock << " ms."<< std::endl;
checkLastCudaError("Kernel execution failed");
// copy result back to host
std::vector<float> mat_out( mat_size * mat_size);
checkErrorsCuda( cudaMemcpy( mat_out.data(), mat_out_device,
sizeof(float) * mat_size * mat_size,
cudaMemcpyDeviceToHost ));
#if 1
// check result
for( unsigned int row = 0; row < mat_size; ++row) {
for( unsigned int col = 0; col < mat_size; ++col) {
if( mat_out[col * mat_size + row] != mat_in[row * mat_size + col]) {
std::cerr << "Transpose error at (" << row << "," << col << ")" << std::endl;
}
}
}
#endif
// clean up device memory
checkErrorsCuda( cudaFree( mat_in_device));
checkErrorsCuda( cudaFree( mat_out_device));
return EXIT_SUCCESS;
}
|
c4c2e1938a5227dec5dfbcd695944f768d3d5cfb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* SUNMATRIX_CUSPARSE unit tests.
* -----------------------------------------------------------------
*/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_serial.h>
#include <sundials/sundials_math.h>
#include <sundials/sundials_matrix.h>
#include <sundials/sundials_types.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <sunmatrix/sunmatrix_sparse.h>
#include "test_sunmatrix.h"
#include "dreadrb.h"
enum { IDENTITY, RANDOM, RBFILE };
/* Implementation specific test of SUNMatrix_cuSparse_SetKernelExecPolicy */
int Test_SetKernelExecPolicy(SUNMatrix A, int myid);
class ATestExecPolicy : public SUNCudaExecPolicy
{
public:
ATestExecPolicy(){}
virtual size_t gridSize(size_t numWorkElements = 0, size_t blockDim = 0) const
{
return 1;
}
virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const
{
return 1;
}
virtual hipStream_t stream() const
{
return 0;
}
virtual SUNCudaExecPolicy* clone() const
{
return static_cast<SUNCudaExecPolicy*>(new ATestExecPolicy());
}
};
/* ----------------------------------------------------------------------
* Main SUNMatrix Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails=0; /* counter for test failures */
sunindextype M, N; /* overall matrix dims */
sunindextype blkrows, blkcols; /* block matrix dims */
int nblocks; /* number of matrix blocks */
int block_nnz_max; /* max number of nnz in block */
int mattype; /* matrix storage type */
N_Vector x, y, d_x, d_y; /* test vectors */
realtype* vecdata; /* pointers to vector data */
SUNMatrix A, B, C, D, dA, dB, dI; /* test matrices */
realtype* matdata; /* pointer to matrix data */
int print_timing, square;
int matrix_to_use;
sunindextype i, j;
FILE* matrixfp;
char* filename;
hipsparseStatus_t cusp_status;
hipsparseHandle_t cusp_handle;
/* initialize some input variables */
blkrows = 0;
blkcols = 0;
nblocks = 0;
square = 0;
/* check input */
if (argc < 7) {
printf("ERROR: SIX (6) inputs required: matrix (filename|random|identity), matrix rows, matrix cols, number of blocks, matrix type (CSR/BCSR), print timing (0/1)\n");
return(-1);
}
/* determine what test matrix to use */
if (!strcmp(argv[1], "random")) {
matrix_to_use = RANDOM;
} else if (!strcmp(argv[1], "identity")) {
matrix_to_use = IDENTITY;
} else {
matrix_to_use = RBFILE;
filename = argv[1];
}
/* if we are not reading from a file, verify that the dimension args are legal */
if (matrix_to_use != RBFILE) {
blkrows = (sunindextype) atol(argv[2]);
if (blkrows <= 0) {
printf("ERROR: number of rows must be a positive integer\n");
return(-1);
}
blkcols = (sunindextype) atol(argv[3]);
if (blkcols <= 0) {
printf("ERROR: number of cols must be a positive integer\n");
return(-1);
}
square = (blkrows == blkcols) ? 1 : 0;
}
nblocks = (sunindextype) atol(argv[4]);
if (nblocks < 1) {
printf("ERROR: number of blocks must be a positive integer\n");
return(-1);
}
if (!strcmp(argv[5], "CSR")) {
mattype = SUNMAT_CUSPARSE_CSR;
if (nblocks != 1) {
printf("ERROR: the CSR format only supports 1 block\n");
return(-1);
}
} else if (!strcmp(argv[5], "BCSR")) {
mattype = SUNMAT_CUSPARSE_BCSR;
if (matrix_to_use == RBFILE) {
printf("ERROR: cannot read BCSR format from a file\n");
}
if (!square) {
printf("ERROR: the BCSR format only supports square block matrices\n");
return(-1);
}
} else {
printf("ERROR: matrix type must be CSR or BCSR\n");
return(-1);
}
print_timing = atoi(argv[6]);
SetTiming(print_timing);
/* Initialize cuSPARSE */
cusp_status = hipsparseCreate(&cusp_handle);
if (cusp_status != HIPSPARSE_STATUS_SUCCESS) {
printf("ERROR: could not create cuSPARSE handle\n");
return(-1);
}
/* Initialize vectors and matrices to NULL */
x = NULL;
y = NULL;
A = NULL;
B = NULL;
C = NULL;
D = NULL;
dA = NULL;
dB = NULL;
dI = NULL;
if (matrix_to_use == RANDOM) {
M = blkrows * nblocks;
N = blkcols * nblocks;
block_nnz_max = blkrows*blkcols / 2;
/* Create sparsity pattern for a block. */
sunindextype *cols = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
sunindextype *rows = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
for (i=0; i<block_nnz_max; i++) {
cols[i] = rand() % blkcols;
rows[i] = rand() % blkrows;
}
/* Fill matrix with uniform random data in [0,1/N] */
D = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(D,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
/* Fill matrix with uniform random data in [0,1/N] */
C = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(C,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
free(cols);
free(rows);
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else if (matrix_to_use == IDENTITY) {
M = blkrows * nblocks;
N = blkcols * nblocks;
D = SUNDenseMatrix(M, N);
SUNMatScaleAddI(RCONST(0.0), D);
if (SUNMatScaleAddI(RCONST(0.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
C = SUNDenseMatrix(M, N);
if (SUNMatScaleAddI(RCONST(0.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else {
SUNMatrix cscA;
matrixfp = fopen(filename, "r");
dreadrb_dist(0, matrixfp, &cscA);
fclose(matrixfp);
if (SUNSparseMatrix_ToCSR(cscA, &A)) {
printf("ERROR: cannot convert matrix that was read to CSR\n");
return(-1);
}
SUNMatDestroy(cscA);
if (SUNMatScaleAddI(RCONST(1.0), A)) {
printf("ERROR: SUNMatScaleAddI failed on matrix that read\n");
return(-1);
}
blkrows = SUNSparseMatrix_Rows(A);
blkcols = SUNSparseMatrix_Columns(A);
square = (blkrows == blkcols) ? 1 : 0;
nblocks = 1;
M = blkrows * nblocks;
N = blkcols * nblocks;
B = SUNMatClone(A);
if (B == NULL || (SUNMatCopy(A, B) != 0)) {
printf("ERROR: failed to SUNMatClone and SUNMatCopy\n");
return(-1);
}
}
printf("cuSPARSE SUNMatrix test: size %ld by %ld, nblocks %ld, block size %ld by %ld, format = %i\n\n",
(long int) M, (long int) N, (long int) nblocks, (long int) blkrows, (long int) blkcols, mattype);
if (mattype == SUNMAT_CUSPARSE_CSR) {
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(A), SM_COLUMNS_S(A), SM_NNZ_S(A), cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(B), SM_COLUMNS_S(B), SM_NNZ_S(B), cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else if (mattype == SUNMAT_CUSPARSE_BCSR) {
sunindextype block_nnz;
/* Calculate actual number of nonzeros per block */
block_nnz = SUNSparseMatrix_NNZ(A) / nblocks;
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else {
printf("ERROR: unknown mattype\n");
return(-1);
}
/* Copy data to device */
fails = SUNMatrix_cuSparse_CopyToDevice(dA, SM_DATA_S(A), SM_INDEXPTRS_S(A), SM_INDEXVALS_S(A));
if (fails != 0) {
printf("ERROR: could not copy A to the device\n");
return(-1);
}
fails = SUNMatrix_cuSparse_CopyToDevice(dB, SM_DATA_S(B), SM_INDEXPTRS_S(B), SM_INDEXVALS_S(B));
if (fails != 0) {
printf("ERROR: could not copy B to the device\n");
return(-1);
}
/* Create/fill I matrix */
dI = NULL;
if (square) {
dI = SUNMatClone_cuSparse(dA);
if (dI == NULL) {
printf("ERROR: SUNMatClone_cuSparse returned NULL\n");
return(-1);
}
if (SUNMatCopy_cuSparse(dA, dI)) {
printf("ERROR: SUNMatCopy_cuSparse failed\n");
return(-1);
}
if (SUNMatScaleAddI_cuSparse(ZERO, dI)) {
printf("ERROR: SUNMatScaleAddI_cuSparse failed\n");
return(-1);
}
}
/* Create vectors */
d_x = N_VNew_Cuda(N);
d_y = N_VNew_Cuda(M);
if (d_x == NULL || d_y == NULL) {
printf("ERROR: N_VNew_Cuda returned NULL\n");
return(-1);
}
x = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_x));
y = N_VMake_Serial(M, N_VGetHostArrayPointer_Cuda(d_y));
if (x == NULL || y == NULL) {
printf("ERROR: N_VMake_Serial returned NULL\n");
return(-1);
}
/* Zero the vectors on the host */
N_VConst(ZERO, x);
N_VConst(ZERO, y);
/* Fill vector on the host */
vecdata = N_VGetArrayPointer(x);
for(i=0; i<N; i++)
vecdata[i] = (realtype) rand() / (realtype) RAND_MAX;
/* Compute reference y on the host */
if (SUNMatMatvec(A, x, y)) {
printf("FAIL: SUNSparseMatrix matvec failure \n \n");
SUNMatDestroy(A); SUNMatDestroy(B);
SUNMatDestroy(C); SUNMatDestroy(D);
SUNMatDestroy(dA); SUNMatDestroy(dB);
N_VDestroy(x); N_VDestroy(y);
N_VDestroy(d_x); N_VDestroy(d_y);
if (square) {
SUNMatDestroy(dI);
}
return(1);
}
/* Copy vectors to the device */
N_VCopyToDevice_Cuda(d_x);
N_VCopyToDevice_Cuda(d_y);
printf("Setup complete\n");
printf("Beginning tests\n\n");
/* SUNMatrix Tests */
fails += Test_SUNMatGetID(dA, SUNMATRIX_CUSPARSE, 0);
fails += Test_SUNMatClone(dA, 0);
fails += Test_SUNMatCopy(dA, 0);
fails += Test_SUNMatZero(dA, 0);
fails += Test_SUNMatScaleAdd(dA, dI, 0);
if (square) fails += Test_SUNMatScaleAddI(dA, dI, 0);
fails += Test_SUNMatMatvec(dA, d_x, d_y, 0);
if (square) fails += Test_SetKernelExecPolicy(dI, 0);
/* Print result */
if (fails) {
SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A), NULL, NULL);
SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B), NULL, NULL);
printf("\nA =\n");
SUNSparseMatrix_Print(A,stdout);
printf("\nB =\n");
SUNSparseMatrix_Print(B,stdout);
N_VCopyFromDevice_Cuda(d_x);
N_VCopyFromDevice_Cuda(d_y);
printf("\nx\n");
N_VPrint_Cuda(d_x);
printf("\ny = Ax (reference)\n");
N_VPrint_Cuda(d_y);
} else {
printf("SUCCESS: SUNMatrix module passed all tests \n \n");
}
printf("Beginning teardown\n");
/* Free vectors and matrices */
N_VDestroy(x);
N_VDestroy(y);
N_VDestroy(d_x);
N_VDestroy(d_y);
SUNMatDestroy(A);
SUNMatDestroy(B);
SUNMatDestroy(C);
SUNMatDestroy(D);
SUNMatDestroy(dA);
SUNMatDestroy(dB);
if (square) {
SUNMatDestroy(dI);
}
hipsparseDestroy(cusp_handle);
printf("Teardown complete\n");
return(fails);
}
/* ----------------------------------------------------------------------
* Test the SUNMatrix_cuSparse_SetKernelExecPolicy function.
* --------------------------------------------------------------------*/
int Test_SetKernelExecPolicy(SUNMatrix I, int myid)
{
printf("HERE\n");
int print_all_ranks = 0;
realtype tol = 100*UNIT_ROUNDOFF;
SUNMatrix B = SUNMatClone(I);
/* check cloned matrix */
if (B == NULL) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" After SUNMatClone, B == NULL \n \n", myid);
return(1);
}
/* copy data */
if (SUNMatCopy(I, B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatCopy returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* set kernel exec policy */
ATestExecPolicy exec_policy;
SUNMatrix_cuSparse_SetKernelExecPolicy(B, &exec_policy);
/* try out an operation */
if (SUNMatScaleAddI(RCONST(-1.0), B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatScaleAddI returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* check matrix */
if (check_matrix_entry(B, ZERO, tol)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" check_matrix_entry returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
TEST_STATUS(" PASSED test -- SetKernelExecPolicy \n", myid);
SUNMatDestroy(B);
return 0;
}
/* ----------------------------------------------------------------------
* Check matrix
* --------------------------------------------------------------------*/
int check_matrix(SUNMatrix dA, SUNMatrix dB, realtype tol)
{
int failure = 0;
SUNMatrix A, B;
realtype *Adata, *Bdata;
sunindextype *Aindexptrs, *Bindexptrs;
sunindextype *Aindexvals, *Bindexvals;
sunindextype i, ANP, Annz, Bnnz;
/* copy matrix data to host for the checks */
A = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dA), SUNMatrix_cuSparse_Columns(dA),
SUNMatrix_cuSparse_NNZ(dA), CSR_MAT);
B = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dB), SUNMatrix_cuSparse_Columns(dB),
SUNMatrix_cuSparse_NNZ(dB), CSR_MAT);
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A),
SM_INDEXPTRS_S(A),
SM_INDEXVALS_S(A));
failure = SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B),
SM_INDEXPTRS_S(B),
SM_INDEXVALS_S(B));
hipDeviceSynchronize();
/* get matrix pointers */
Adata = SUNSparseMatrix_Data(A);
Aindexptrs = SUNSparseMatrix_IndexPointers(A);
Aindexvals = SUNSparseMatrix_IndexValues(A);
ANP = SUNSparseMatrix_NP(A);
Annz = SUNSparseMatrix_NNZ(A);
Bdata = SUNSparseMatrix_Data(B);
Bindexptrs = SUNSparseMatrix_IndexPointers(B);
Bindexvals = SUNSparseMatrix_IndexValues(B);
Bnnz = SUNSparseMatrix_NNZ(B);
/* matrices must have same sparsetype, shape and actual data lengths */
if (SUNMatGetID(dA) != SUNMatGetID(dB)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatGetID(dA), SUNMatGetID(dB));
SUNMatDestroy(dA); SUNMatDestroy(dB);
return(1);
}
if (SUNMatrix_cuSparse_SparseType(A) != SUNMatrix_cuSparse_SparseType(B)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatrix_cuSparse_SparseType(A), SUNMatrix_cuSparse_SparseType(B));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Rows(dA) != SUNMatrix_cuSparse_Rows(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of rows (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Rows(dA), (long int) SUNMatrix_cuSparse_Rows(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Columns(dA) != SUNMatrix_cuSparse_Columns(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of columns (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Columns(dA),
(long int) SUNMatrix_cuSparse_Columns(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (Annz != Bnnz) {
printf(">>> ERROR: check_matrix: Different numbers of nonzeros (%ld vs %ld)\n",
(long int) Annz, (long int) Bnnz);
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare sparsity patterns */
for (i=0; i<ANP; i++)
failure += (Aindexptrs[i] != Bindexptrs[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexptrs \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
for (i=0; i<Annz; i++)
failure += (Aindexvals[i] != Bindexvals[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexvals \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare matrix values */
for(i=0; i<Annz; i++)
failure += FNEQ(Adata[i], Bdata[i], tol);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different entries \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
SUNMatDestroy(A); SUNMatDestroy(B);
return(0);
}
int check_matrix_entry(SUNMatrix dA, realtype val, realtype tol)
{
int failure = 0;
realtype *Adata;
sunindextype i;
/* copy matrix data to host for the checks */
Adata = (realtype*) malloc(SUNMatrix_cuSparse_NNZ(dA)*sizeof(realtype));
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, Adata, NULL, NULL);
hipDeviceSynchronize();
/* compare data */
for(i=0; i < SUNMatrix_cuSparse_NNZ(dA); i++) {
failure += FNEQ(Adata[i], val, tol);
}
free(Adata);
if (failure > ZERO)
return(1);
else
return(0);
}
int check_vector(N_Vector expected, N_Vector computed, realtype tol)
{
int failure = 0;
realtype *xdata, *ydata;
sunindextype xldata, yldata;
sunindextype i;
/* get vector data */
xdata = N_VGetHostArrayPointer_Cuda(expected);
ydata = N_VGetHostArrayPointer_Cuda(computed);
/* copy data to host */
N_VCopyFromDevice_Cuda(expected);
N_VCopyFromDevice_Cuda(computed);
hipDeviceSynchronize();
/* check data lengths */
xldata = N_VGetLength_Cuda(expected);
yldata = N_VGetLength_Cuda(computed);
if (xldata != yldata) {
printf(">>> ERROR: check_vector: Different data array lengths \n");
return(1);
}
/* check vector data */
for(i=0; i < xldata; i++){
failure += FNEQ(xdata[i], ydata[i], tol);
}
if (failure > ZERO)
return(1);
else
return(0);
}
booleantype has_data(SUNMatrix A)
{
realtype *Adata = SUNMatrix_cuSparse_Data(A);
if (Adata == NULL)
return SUNFALSE;
else
return SUNTRUE;
}
booleantype is_square(SUNMatrix A)
{
if (SUNMatrix_cuSparse_Rows(A) == SUNMatrix_cuSparse_Columns(A))
return SUNTRUE;
else
return SUNFALSE;
}
| c4c2e1938a5227dec5dfbcd695944f768d3d5cfb.cu | /*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* SUNMATRIX_CUSPARSE unit tests.
* -----------------------------------------------------------------
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_serial.h>
#include <sundials/sundials_math.h>
#include <sundials/sundials_matrix.h>
#include <sundials/sundials_types.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <sunmatrix/sunmatrix_sparse.h>
#include "test_sunmatrix.h"
#include "dreadrb.h"
enum { IDENTITY, RANDOM, RBFILE };
/* Implementation specific test of SUNMatrix_cuSparse_SetKernelExecPolicy */
int Test_SetKernelExecPolicy(SUNMatrix A, int myid);
class ATestExecPolicy : public SUNCudaExecPolicy
{
public:
ATestExecPolicy(){}
virtual size_t gridSize(size_t numWorkElements = 0, size_t blockDim = 0) const
{
return 1;
}
virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const
{
return 1;
}
virtual cudaStream_t stream() const
{
return 0;
}
virtual SUNCudaExecPolicy* clone() const
{
return static_cast<SUNCudaExecPolicy*>(new ATestExecPolicy());
}
};
/* ----------------------------------------------------------------------
* Main SUNMatrix Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails=0; /* counter for test failures */
sunindextype M, N; /* overall matrix dims */
sunindextype blkrows, blkcols; /* block matrix dims */
int nblocks; /* number of matrix blocks */
int block_nnz_max; /* max number of nnz in block */
int mattype; /* matrix storage type */
N_Vector x, y, d_x, d_y; /* test vectors */
realtype* vecdata; /* pointers to vector data */
SUNMatrix A, B, C, D, dA, dB, dI; /* test matrices */
realtype* matdata; /* pointer to matrix data */
int print_timing, square;
int matrix_to_use;
sunindextype i, j;
FILE* matrixfp;
char* filename;
cusparseStatus_t cusp_status;
cusparseHandle_t cusp_handle;
/* initialize some input variables */
blkrows = 0;
blkcols = 0;
nblocks = 0;
square = 0;
/* check input */
if (argc < 7) {
printf("ERROR: SIX (6) inputs required: matrix (filename|random|identity), matrix rows, matrix cols, number of blocks, matrix type (CSR/BCSR), print timing (0/1)\n");
return(-1);
}
/* determine what test matrix to use */
if (!strcmp(argv[1], "random")) {
matrix_to_use = RANDOM;
} else if (!strcmp(argv[1], "identity")) {
matrix_to_use = IDENTITY;
} else {
matrix_to_use = RBFILE;
filename = argv[1];
}
/* if we are not reading from a file, verify that the dimension args are legal */
if (matrix_to_use != RBFILE) {
blkrows = (sunindextype) atol(argv[2]);
if (blkrows <= 0) {
printf("ERROR: number of rows must be a positive integer\n");
return(-1);
}
blkcols = (sunindextype) atol(argv[3]);
if (blkcols <= 0) {
printf("ERROR: number of cols must be a positive integer\n");
return(-1);
}
square = (blkrows == blkcols) ? 1 : 0;
}
nblocks = (sunindextype) atol(argv[4]);
if (nblocks < 1) {
printf("ERROR: number of blocks must be a positive integer\n");
return(-1);
}
if (!strcmp(argv[5], "CSR")) {
mattype = SUNMAT_CUSPARSE_CSR;
if (nblocks != 1) {
printf("ERROR: the CSR format only supports 1 block\n");
return(-1);
}
} else if (!strcmp(argv[5], "BCSR")) {
mattype = SUNMAT_CUSPARSE_BCSR;
if (matrix_to_use == RBFILE) {
printf("ERROR: cannot read BCSR format from a file\n");
}
if (!square) {
printf("ERROR: the BCSR format only supports square block matrices\n");
return(-1);
}
} else {
printf("ERROR: matrix type must be CSR or BCSR\n");
return(-1);
}
print_timing = atoi(argv[6]);
SetTiming(print_timing);
/* Initialize cuSPARSE */
cusp_status = cusparseCreate(&cusp_handle);
if (cusp_status != CUSPARSE_STATUS_SUCCESS) {
printf("ERROR: could not create cuSPARSE handle\n");
return(-1);
}
/* Initialize vectors and matrices to NULL */
x = NULL;
y = NULL;
A = NULL;
B = NULL;
C = NULL;
D = NULL;
dA = NULL;
dB = NULL;
dI = NULL;
if (matrix_to_use == RANDOM) {
M = blkrows * nblocks;
N = blkcols * nblocks;
block_nnz_max = blkrows*blkcols / 2;
/* Create sparsity pattern for a block. */
sunindextype *cols = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
sunindextype *rows = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
for (i=0; i<block_nnz_max; i++) {
cols[i] = rand() % blkcols;
rows[i] = rand() % blkrows;
}
/* Fill matrix with uniform random data in [0,1/N] */
D = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(D,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
/* Fill matrix with uniform random data in [0,1/N] */
C = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(C,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
free(cols);
free(rows);
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else if (matrix_to_use == IDENTITY) {
M = blkrows * nblocks;
N = blkcols * nblocks;
D = SUNDenseMatrix(M, N);
SUNMatScaleAddI(RCONST(0.0), D);
if (SUNMatScaleAddI(RCONST(0.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
C = SUNDenseMatrix(M, N);
if (SUNMatScaleAddI(RCONST(0.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else {
SUNMatrix cscA;
matrixfp = fopen(filename, "r");
dreadrb_dist(0, matrixfp, &cscA);
fclose(matrixfp);
if (SUNSparseMatrix_ToCSR(cscA, &A)) {
printf("ERROR: cannot convert matrix that was read to CSR\n");
return(-1);
}
SUNMatDestroy(cscA);
if (SUNMatScaleAddI(RCONST(1.0), A)) {
printf("ERROR: SUNMatScaleAddI failed on matrix that read\n");
return(-1);
}
blkrows = SUNSparseMatrix_Rows(A);
blkcols = SUNSparseMatrix_Columns(A);
square = (blkrows == blkcols) ? 1 : 0;
nblocks = 1;
M = blkrows * nblocks;
N = blkcols * nblocks;
B = SUNMatClone(A);
if (B == NULL || (SUNMatCopy(A, B) != 0)) {
printf("ERROR: failed to SUNMatClone and SUNMatCopy\n");
return(-1);
}
}
printf("cuSPARSE SUNMatrix test: size %ld by %ld, nblocks %ld, block size %ld by %ld, format = %i\n\n",
(long int) M, (long int) N, (long int) nblocks, (long int) blkrows, (long int) blkcols, mattype);
if (mattype == SUNMAT_CUSPARSE_CSR) {
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(A), SM_COLUMNS_S(A), SM_NNZ_S(A), cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(B), SM_COLUMNS_S(B), SM_NNZ_S(B), cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else if (mattype == SUNMAT_CUSPARSE_BCSR) {
sunindextype block_nnz;
/* Calculate actual number of nonzeros per block */
block_nnz = SUNSparseMatrix_NNZ(A) / nblocks;
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else {
printf("ERROR: unknown mattype\n");
return(-1);
}
/* Copy data to device */
fails = SUNMatrix_cuSparse_CopyToDevice(dA, SM_DATA_S(A), SM_INDEXPTRS_S(A), SM_INDEXVALS_S(A));
if (fails != 0) {
printf("ERROR: could not copy A to the device\n");
return(-1);
}
fails = SUNMatrix_cuSparse_CopyToDevice(dB, SM_DATA_S(B), SM_INDEXPTRS_S(B), SM_INDEXVALS_S(B));
if (fails != 0) {
printf("ERROR: could not copy B to the device\n");
return(-1);
}
/* Create/fill I matrix */
dI = NULL;
if (square) {
dI = SUNMatClone_cuSparse(dA);
if (dI == NULL) {
printf("ERROR: SUNMatClone_cuSparse returned NULL\n");
return(-1);
}
if (SUNMatCopy_cuSparse(dA, dI)) {
printf("ERROR: SUNMatCopy_cuSparse failed\n");
return(-1);
}
if (SUNMatScaleAddI_cuSparse(ZERO, dI)) {
printf("ERROR: SUNMatScaleAddI_cuSparse failed\n");
return(-1);
}
}
/* Create vectors */
d_x = N_VNew_Cuda(N);
d_y = N_VNew_Cuda(M);
if (d_x == NULL || d_y == NULL) {
printf("ERROR: N_VNew_Cuda returned NULL\n");
return(-1);
}
x = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_x));
y = N_VMake_Serial(M, N_VGetHostArrayPointer_Cuda(d_y));
if (x == NULL || y == NULL) {
printf("ERROR: N_VMake_Serial returned NULL\n");
return(-1);
}
/* Zero the vectors on the host */
N_VConst(ZERO, x);
N_VConst(ZERO, y);
/* Fill vector on the host */
vecdata = N_VGetArrayPointer(x);
for(i=0; i<N; i++)
vecdata[i] = (realtype) rand() / (realtype) RAND_MAX;
/* Compute reference y on the host */
if (SUNMatMatvec(A, x, y)) {
printf("FAIL: SUNSparseMatrix matvec failure \n \n");
SUNMatDestroy(A); SUNMatDestroy(B);
SUNMatDestroy(C); SUNMatDestroy(D);
SUNMatDestroy(dA); SUNMatDestroy(dB);
N_VDestroy(x); N_VDestroy(y);
N_VDestroy(d_x); N_VDestroy(d_y);
if (square) {
SUNMatDestroy(dI);
}
return(1);
}
/* Copy vectors to the device */
N_VCopyToDevice_Cuda(d_x);
N_VCopyToDevice_Cuda(d_y);
printf("Setup complete\n");
printf("Beginning tests\n\n");
/* SUNMatrix Tests */
fails += Test_SUNMatGetID(dA, SUNMATRIX_CUSPARSE, 0);
fails += Test_SUNMatClone(dA, 0);
fails += Test_SUNMatCopy(dA, 0);
fails += Test_SUNMatZero(dA, 0);
fails += Test_SUNMatScaleAdd(dA, dI, 0);
if (square) fails += Test_SUNMatScaleAddI(dA, dI, 0);
fails += Test_SUNMatMatvec(dA, d_x, d_y, 0);
if (square) fails += Test_SetKernelExecPolicy(dI, 0);
/* Print result */
if (fails) {
SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A), NULL, NULL);
SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B), NULL, NULL);
printf("\nA =\n");
SUNSparseMatrix_Print(A,stdout);
printf("\nB =\n");
SUNSparseMatrix_Print(B,stdout);
N_VCopyFromDevice_Cuda(d_x);
N_VCopyFromDevice_Cuda(d_y);
printf("\nx\n");
N_VPrint_Cuda(d_x);
printf("\ny = Ax (reference)\n");
N_VPrint_Cuda(d_y);
} else {
printf("SUCCESS: SUNMatrix module passed all tests \n \n");
}
printf("Beginning teardown\n");
/* Free vectors and matrices */
N_VDestroy(x);
N_VDestroy(y);
N_VDestroy(d_x);
N_VDestroy(d_y);
SUNMatDestroy(A);
SUNMatDestroy(B);
SUNMatDestroy(C);
SUNMatDestroy(D);
SUNMatDestroy(dA);
SUNMatDestroy(dB);
if (square) {
SUNMatDestroy(dI);
}
cusparseDestroy(cusp_handle);
printf("Teardown complete\n");
return(fails);
}
/* ----------------------------------------------------------------------
* Test the SUNMatrix_cuSparse_SetKernelExecPolicy function.
* --------------------------------------------------------------------*/
int Test_SetKernelExecPolicy(SUNMatrix I, int myid)
{
printf("HERE\n");
int print_all_ranks = 0;
realtype tol = 100*UNIT_ROUNDOFF;
SUNMatrix B = SUNMatClone(I);
/* check cloned matrix */
if (B == NULL) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" After SUNMatClone, B == NULL \n \n", myid);
return(1);
}
/* copy data */
if (SUNMatCopy(I, B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatCopy returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* set kernel exec policy */
ATestExecPolicy exec_policy;
SUNMatrix_cuSparse_SetKernelExecPolicy(B, &exec_policy);
/* try out an operation */
if (SUNMatScaleAddI(RCONST(-1.0), B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatScaleAddI returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* check matrix */
if (check_matrix_entry(B, ZERO, tol)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" check_matrix_entry returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
TEST_STATUS(" PASSED test -- SetKernelExecPolicy \n", myid);
SUNMatDestroy(B);
return 0;
}
/* ----------------------------------------------------------------------
* Check matrix
* --------------------------------------------------------------------*/
int check_matrix(SUNMatrix dA, SUNMatrix dB, realtype tol)
{
int failure = 0;
SUNMatrix A, B;
realtype *Adata, *Bdata;
sunindextype *Aindexptrs, *Bindexptrs;
sunindextype *Aindexvals, *Bindexvals;
sunindextype i, ANP, Annz, Bnnz;
/* copy matrix data to host for the checks */
A = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dA), SUNMatrix_cuSparse_Columns(dA),
SUNMatrix_cuSparse_NNZ(dA), CSR_MAT);
B = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dB), SUNMatrix_cuSparse_Columns(dB),
SUNMatrix_cuSparse_NNZ(dB), CSR_MAT);
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A),
SM_INDEXPTRS_S(A),
SM_INDEXVALS_S(A));
failure = SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B),
SM_INDEXPTRS_S(B),
SM_INDEXVALS_S(B));
cudaDeviceSynchronize();
/* get matrix pointers */
Adata = SUNSparseMatrix_Data(A);
Aindexptrs = SUNSparseMatrix_IndexPointers(A);
Aindexvals = SUNSparseMatrix_IndexValues(A);
ANP = SUNSparseMatrix_NP(A);
Annz = SUNSparseMatrix_NNZ(A);
Bdata = SUNSparseMatrix_Data(B);
Bindexptrs = SUNSparseMatrix_IndexPointers(B);
Bindexvals = SUNSparseMatrix_IndexValues(B);
Bnnz = SUNSparseMatrix_NNZ(B);
/* matrices must have same sparsetype, shape and actual data lengths */
if (SUNMatGetID(dA) != SUNMatGetID(dB)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatGetID(dA), SUNMatGetID(dB));
SUNMatDestroy(dA); SUNMatDestroy(dB);
return(1);
}
if (SUNMatrix_cuSparse_SparseType(A) != SUNMatrix_cuSparse_SparseType(B)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatrix_cuSparse_SparseType(A), SUNMatrix_cuSparse_SparseType(B));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Rows(dA) != SUNMatrix_cuSparse_Rows(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of rows (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Rows(dA), (long int) SUNMatrix_cuSparse_Rows(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Columns(dA) != SUNMatrix_cuSparse_Columns(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of columns (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Columns(dA),
(long int) SUNMatrix_cuSparse_Columns(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (Annz != Bnnz) {
printf(">>> ERROR: check_matrix: Different numbers of nonzeros (%ld vs %ld)\n",
(long int) Annz, (long int) Bnnz);
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare sparsity patterns */
for (i=0; i<ANP; i++)
failure += (Aindexptrs[i] != Bindexptrs[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexptrs \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
for (i=0; i<Annz; i++)
failure += (Aindexvals[i] != Bindexvals[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexvals \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare matrix values */
for(i=0; i<Annz; i++)
failure += FNEQ(Adata[i], Bdata[i], tol);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different entries \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
SUNMatDestroy(A); SUNMatDestroy(B);
return(0);
}
int check_matrix_entry(SUNMatrix dA, realtype val, realtype tol)
{
int failure = 0;
realtype *Adata;
sunindextype i;
/* copy matrix data to host for the checks */
Adata = (realtype*) malloc(SUNMatrix_cuSparse_NNZ(dA)*sizeof(realtype));
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, Adata, NULL, NULL);
cudaDeviceSynchronize();
/* compare data */
for(i=0; i < SUNMatrix_cuSparse_NNZ(dA); i++) {
failure += FNEQ(Adata[i], val, tol);
}
free(Adata);
if (failure > ZERO)
return(1);
else
return(0);
}
int check_vector(N_Vector expected, N_Vector computed, realtype tol)
{
int failure = 0;
realtype *xdata, *ydata;
sunindextype xldata, yldata;
sunindextype i;
/* get vector data */
xdata = N_VGetHostArrayPointer_Cuda(expected);
ydata = N_VGetHostArrayPointer_Cuda(computed);
/* copy data to host */
N_VCopyFromDevice_Cuda(expected);
N_VCopyFromDevice_Cuda(computed);
cudaDeviceSynchronize();
/* check data lengths */
xldata = N_VGetLength_Cuda(expected);
yldata = N_VGetLength_Cuda(computed);
if (xldata != yldata) {
printf(">>> ERROR: check_vector: Different data array lengths \n");
return(1);
}
/* check vector data */
for(i=0; i < xldata; i++){
failure += FNEQ(xdata[i], ydata[i], tol);
}
if (failure > ZERO)
return(1);
else
return(0);
}
booleantype has_data(SUNMatrix A)
{
realtype *Adata = SUNMatrix_cuSparse_Data(A);
if (Adata == NULL)
return SUNFALSE;
else
return SUNTRUE;
}
booleantype is_square(SUNMatrix A)
{
if (SUNMatrix_cuSparse_Rows(A) == SUNMatrix_cuSparse_Columns(A))
return SUNTRUE;
else
return SUNFALSE;
}
|
b483d05686ae9d4e9584b857d53b8020063e0add.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
#include "matrix_vector_op.cuh"
namespace raft {
namespace linalg {
template <typename T, typename IdxType = int>
struct MatVecOpInputs {
T tolerance;
IdxType rows, cols;
bool rowMajor, bcastAlongRows, useTwoVectors;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream &operator<<(::std::ostream &os,
const MatVecOpInputs<T, IdxType> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T, typename IdxType>
void matrixVectorOpLaunch(T *out, const T *in, const T *vec1, const T *vec2,
IdxType D, IdxType N, bool rowMajor,
bool bcastAlongRows, bool useTwoVectors,
hipStream_t stream) {
if (useTwoVectors) {
matrixVectorOp(
out, in, vec1, vec2, D, N, rowMajor, bcastAlongRows,
[] __device__(T a, T b, T c) { return a + b + c; }, stream);
} else {
matrixVectorOp(
out, in, vec1, D, N, rowMajor, bcastAlongRows,
[] __device__(T a, T b) { return a + b; }, stream);
}
}
template <typename T, typename IdxType>
class MatVecOpTest
: public ::testing::TestWithParam<MatVecOpInputs<T, IdxType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MatVecOpInputs<T, IdxType>>::GetParam();
raft::random::Rng r(params.seed);
IdxType N = params.rows, D = params.cols;
IdxType len = N * D;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
IdxType vecLen = params.bcastAlongRows ? D : N;
allocate(vec1, vecLen);
allocate(vec2, vecLen);
r.uniform(in, len, (T)-1.0, (T)1.0, stream);
r.uniform(vec1, vecLen, (T)-1.0, (T)1.0, stream);
r.uniform(vec2, vecLen, (T)-1.0, (T)1.0, stream);
if (params.useTwoVectors) {
naiveMatVec(out_ref, in, vec1, vec2, D, N, params.rowMajor,
params.bcastAlongRows, (T)1.0);
} else {
naiveMatVec(out_ref, in, vec1, D, N, params.rowMajor,
params.bcastAlongRows, (T)1.0);
}
matrixVectorOpLaunch(out, in, vec1, vec2, D, N, params.rowMajor,
params.bcastAlongRows, params.useTwoVectors, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(vec1));
CUDA_CHECK(hipFree(vec2));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(in));
}
protected:
MatVecOpInputs<T, IdxType> params;
T *in, *out, *out_ref, *vec1, *vec2;
};
const std::vector<MatVecOpInputs<float, int>> inputsf_i32 = {
{0.00001f, 1024, 32, true, true, false, 1234ULL},
{0.00001f, 1024, 64, true, true, false, 1234ULL},
{0.00001f, 1024, 32, true, false, false, 1234ULL},
{0.00001f, 1024, 64, true, false, false, 1234ULL},
{0.00001f, 1024, 32, false, true, false, 1234ULL},
{0.00001f, 1024, 64, false, true, false, 1234ULL},
{0.00001f, 1024, 32, false, false, false, 1234ULL},
{0.00001f, 1024, 64, false, false, false, 1234ULL},
{0.00001f, 1024, 32, true, true, true, 1234ULL},
{0.00001f, 1024, 64, true, true, true, 1234ULL},
{0.00001f, 1024, 32, true, false, true, 1234ULL},
{0.00001f, 1024, 64, true, false, true, 1234ULL},
{0.00001f, 1024, 32, false, true, true, 1234ULL},
{0.00001f, 1024, 64, false, true, true, 1234ULL},
{0.00001f, 1024, 32, false, false, true, 1234ULL},
{0.00001f, 1024, 64, false, false, true, 1234ULL}};
typedef MatVecOpTest<float, int> MatVecOpTestF_i32;
TEST_P(MatVecOpTestF_i32, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestF_i32,
::testing::ValuesIn(inputsf_i32));
const std::vector<MatVecOpInputs<float, size_t>> inputsf_i64 = {
{0.00001f, 2500, 250, false, false, false, 1234ULL},
{0.00001f, 2500, 250, false, false, true, 1234ULL}};
typedef MatVecOpTest<float, size_t> MatVecOpTestF_i64;
TEST_P(MatVecOpTestF_i64, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestF_i64,
::testing::ValuesIn(inputsf_i64));
const std::vector<MatVecOpInputs<double, int>> inputsd_i32 = {
{0.0000001, 1024, 32, true, true, false, 1234ULL},
{0.0000001, 1024, 64, true, true, false, 1234ULL},
{0.0000001, 1024, 32, true, false, false, 1234ULL},
{0.0000001, 1024, 64, true, false, false, 1234ULL},
{0.0000001, 1024, 32, false, true, false, 1234ULL},
{0.0000001, 1024, 64, false, true, false, 1234ULL},
{0.0000001, 1024, 32, false, false, false, 1234ULL},
{0.0000001, 1024, 64, false, false, false, 1234ULL},
{0.0000001, 1024, 32, true, true, true, 1234ULL},
{0.0000001, 1024, 64, true, true, true, 1234ULL},
{0.0000001, 1024, 32, true, false, true, 1234ULL},
{0.0000001, 1024, 64, true, false, true, 1234ULL},
{0.0000001, 1024, 32, false, true, true, 1234ULL},
{0.0000001, 1024, 64, false, true, true, 1234ULL},
{0.0000001, 1024, 32, false, false, true, 1234ULL},
{0.0000001, 1024, 64, false, false, true, 1234ULL}};
typedef MatVecOpTest<double, int> MatVecOpTestD_i32;
TEST_P(MatVecOpTestD_i32, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestD_i32,
::testing::ValuesIn(inputsd_i32));
const std::vector<MatVecOpInputs<double, size_t>> inputsd_i64 = {
{0.0000001, 2500, 250, false, false, false, 1234ULL},
{0.0000001, 2500, 250, false, false, true, 1234ULL}};
typedef MatVecOpTest<double, size_t> MatVecOpTestD_i64;
TEST_P(MatVecOpTestD_i64, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestD_i64,
::testing::ValuesIn(inputsd_i64));
} // end namespace linalg
} // end namespace raft
| b483d05686ae9d4e9584b857d53b8020063e0add.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
#include "matrix_vector_op.cuh"
namespace raft {
namespace linalg {
template <typename T, typename IdxType = int>
struct MatVecOpInputs {
T tolerance;
IdxType rows, cols;
bool rowMajor, bcastAlongRows, useTwoVectors;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream &operator<<(::std::ostream &os,
const MatVecOpInputs<T, IdxType> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T, typename IdxType>
void matrixVectorOpLaunch(T *out, const T *in, const T *vec1, const T *vec2,
IdxType D, IdxType N, bool rowMajor,
bool bcastAlongRows, bool useTwoVectors,
cudaStream_t stream) {
if (useTwoVectors) {
matrixVectorOp(
out, in, vec1, vec2, D, N, rowMajor, bcastAlongRows,
[] __device__(T a, T b, T c) { return a + b + c; }, stream);
} else {
matrixVectorOp(
out, in, vec1, D, N, rowMajor, bcastAlongRows,
[] __device__(T a, T b) { return a + b; }, stream);
}
}
template <typename T, typename IdxType>
class MatVecOpTest
: public ::testing::TestWithParam<MatVecOpInputs<T, IdxType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MatVecOpInputs<T, IdxType>>::GetParam();
raft::random::Rng r(params.seed);
IdxType N = params.rows, D = params.cols;
IdxType len = N * D;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
IdxType vecLen = params.bcastAlongRows ? D : N;
allocate(vec1, vecLen);
allocate(vec2, vecLen);
r.uniform(in, len, (T)-1.0, (T)1.0, stream);
r.uniform(vec1, vecLen, (T)-1.0, (T)1.0, stream);
r.uniform(vec2, vecLen, (T)-1.0, (T)1.0, stream);
if (params.useTwoVectors) {
naiveMatVec(out_ref, in, vec1, vec2, D, N, params.rowMajor,
params.bcastAlongRows, (T)1.0);
} else {
naiveMatVec(out_ref, in, vec1, D, N, params.rowMajor,
params.bcastAlongRows, (T)1.0);
}
matrixVectorOpLaunch(out, in, vec1, vec2, D, N, params.rowMajor,
params.bcastAlongRows, params.useTwoVectors, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(vec1));
CUDA_CHECK(cudaFree(vec2));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(in));
}
protected:
MatVecOpInputs<T, IdxType> params;
T *in, *out, *out_ref, *vec1, *vec2;
};
const std::vector<MatVecOpInputs<float, int>> inputsf_i32 = {
{0.00001f, 1024, 32, true, true, false, 1234ULL},
{0.00001f, 1024, 64, true, true, false, 1234ULL},
{0.00001f, 1024, 32, true, false, false, 1234ULL},
{0.00001f, 1024, 64, true, false, false, 1234ULL},
{0.00001f, 1024, 32, false, true, false, 1234ULL},
{0.00001f, 1024, 64, false, true, false, 1234ULL},
{0.00001f, 1024, 32, false, false, false, 1234ULL},
{0.00001f, 1024, 64, false, false, false, 1234ULL},
{0.00001f, 1024, 32, true, true, true, 1234ULL},
{0.00001f, 1024, 64, true, true, true, 1234ULL},
{0.00001f, 1024, 32, true, false, true, 1234ULL},
{0.00001f, 1024, 64, true, false, true, 1234ULL},
{0.00001f, 1024, 32, false, true, true, 1234ULL},
{0.00001f, 1024, 64, false, true, true, 1234ULL},
{0.00001f, 1024, 32, false, false, true, 1234ULL},
{0.00001f, 1024, 64, false, false, true, 1234ULL}};
typedef MatVecOpTest<float, int> MatVecOpTestF_i32;
TEST_P(MatVecOpTestF_i32, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestF_i32,
::testing::ValuesIn(inputsf_i32));
const std::vector<MatVecOpInputs<float, size_t>> inputsf_i64 = {
{0.00001f, 2500, 250, false, false, false, 1234ULL},
{0.00001f, 2500, 250, false, false, true, 1234ULL}};
typedef MatVecOpTest<float, size_t> MatVecOpTestF_i64;
TEST_P(MatVecOpTestF_i64, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestF_i64,
::testing::ValuesIn(inputsf_i64));
const std::vector<MatVecOpInputs<double, int>> inputsd_i32 = {
{0.0000001, 1024, 32, true, true, false, 1234ULL},
{0.0000001, 1024, 64, true, true, false, 1234ULL},
{0.0000001, 1024, 32, true, false, false, 1234ULL},
{0.0000001, 1024, 64, true, false, false, 1234ULL},
{0.0000001, 1024, 32, false, true, false, 1234ULL},
{0.0000001, 1024, 64, false, true, false, 1234ULL},
{0.0000001, 1024, 32, false, false, false, 1234ULL},
{0.0000001, 1024, 64, false, false, false, 1234ULL},
{0.0000001, 1024, 32, true, true, true, 1234ULL},
{0.0000001, 1024, 64, true, true, true, 1234ULL},
{0.0000001, 1024, 32, true, false, true, 1234ULL},
{0.0000001, 1024, 64, true, false, true, 1234ULL},
{0.0000001, 1024, 32, false, true, true, 1234ULL},
{0.0000001, 1024, 64, false, true, true, 1234ULL},
{0.0000001, 1024, 32, false, false, true, 1234ULL},
{0.0000001, 1024, 64, false, false, true, 1234ULL}};
typedef MatVecOpTest<double, int> MatVecOpTestD_i32;
TEST_P(MatVecOpTestD_i32, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestD_i32,
::testing::ValuesIn(inputsd_i32));
const std::vector<MatVecOpInputs<double, size_t>> inputsd_i64 = {
{0.0000001, 2500, 250, false, false, false, 1234ULL},
{0.0000001, 2500, 250, false, false, true, 1234ULL}};
typedef MatVecOpTest<double, size_t> MatVecOpTestD_i64;
TEST_P(MatVecOpTestD_i64, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.rows * params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MatVecOpTests, MatVecOpTestD_i64,
::testing::ValuesIn(inputsd_i64));
} // end namespace linalg
} // end namespace raft
|
0f8407b8e4dadb0edea78cc4d5a3d14ef2a5d6e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DeviceLiMapSv3.cuh"
#include <iostream>
#include "cuda_shared.h"
#include <hip/hip_cooperative_groups.h>
#include <cuda/std/functional>
#include "cublas_shared.h"
#include "kernels/misc.cuh"
#include "kernels/square_sum.cuh"
#include "kernels/matrix2vector.cuh"
#include "kernels/threshold.cuh"
static __device__ float* _solutionD;
static __device__ float* _signalD;
static __device__ float* _dictionaryD;
static __device__ float* _dictionaryInverseD;
static __device__ float* _alphaD;
static __device__ float* _alphaNewD;
static __device__ float* _beta;
static __device__ float* _intermD;
static __device__ float _signalSquareSum;
static __device__ float _alphaDiffSquareSum;
template<int unrollFactor>
__global__ void GetAlphaImprv(size_t dictionaryWords, size_t signalSize) {
size_t idx = blockIdx.x * (blockDim.x * unrollFactor) + threadIdx.x;
size_t idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dictionaryWords) return;
float data = 0.0f;
#pragma unroll
for (size_t i = 0; i < unrollFactor; i++)
{
size_t vOffset = idx + i * blockDim.x;
float dicInverse = vOffset < signalSize ? _dictionaryInverseD[idy * signalSize + vOffset] : 0.0f;
float signal = vOffset < signalSize ? _signalD[vOffset] : 0.0f;
//data += (dicInverse * signal);
data = fmaf(dicInverse, signal, data);
}
KernelReduce<void(float*, float*, float), float*, float*>(data, [](float* ptr1, float* ptr2, float sum) {
atomicAdd(ptr1, sum);
atomicAdd(ptr2, sum);
}, &_alphaD[idy], &_alphaNewD[idy]);
}
__global__ void LiMapS2(size_t dictionaryWords, size_t signalSize) {
// 1) The first step of the LiMapS algorithm is to calculate the starting lamba coefficient. In order to do so, we need to calculate
// the signal norm. So we enqueue on the default stream the SquareSum operation and then we wait for it.
// The norm is foundamental for the next steps so there is nothing that we can do to avoid the sync time waste
_signalSquareSum = 0.0f;
dim3 blocks(128);
dim3 red8DicGridSize = GetGridSize(blocks, dictionaryWords, 8);
dim3 red8SignalGridSize = GetGridSize(blocks, signalSize, 8);
SquareSumKrnlUnroll<8> << <red8SignalGridSize, blocks, blocks.x / warpSize >> > (_signalD, signalSize, &_signalSquareSum);
CUDA_CHECKD(hipDeviceSynchronize());
assert(_signalSquareSum >= 0.0f);
float t = sqrtf(_signalSquareSum);
float lambda = 1.0f / t;
_beta = new float[dictionaryWords];
_intermD = new float[signalSize];
// 2) The second step of the algorithm is to prepare the starting alpha vector so also here we
// Launch the kernel calculation and we synchronize the device
// Is it necessary??
//FillZero<1> << <gridSize, blocks >> > (_alphaD, dictionaryWords);
//FillZero<1> << <gridSize, blocks >> > (_alphaOldD, dictionaryWords);
dim3 gridSize = red8SignalGridSize;
gridSize.y = dictionaryWords;
int sharedMemSize = blocks.x / warpSize;
GetAlphaImprv<8> << <gridSize, blocks, sharedMemSize >> > (dictionaryWords, signalSize);
CUDA_CHECKD(hipPeekAtLastError());
CUDA_CHECKD(hipDeviceSynchronize());
int i = 0;
for (i = 0; i < 1000; i++)
{
// We set the alphaOld as the current alpha. We can do this by just swapping the pointer, avoiding
// useless data transfer
cuda::std::swap(_alphaD, _alphaNewD);
// From here, we split our computation next alpha computation in different step. This is necessary since some calculation
// depend on data that should accessed after a global sync point (ex, after calculating the intermediate (dic * beta - sig) vector
// Since global sync CANNOT be achieved (at least in old devices that not support grid_group::sync() method), we can do better:
// we just queue our splitted work on the default stream, and then we just sync with the device at the end from this kenel.
// In this way, the work is executed with all data dependencies respected
// 3.1) We need to compute the beta vector for this iterarion
CalculateBeta<8> << <red8DicGridSize, blocks, 0 >> > (_alphaD, _beta, lambda, dictionaryWords);
// 3.2) We need to compute the intermediate (dic * beta - sig) vector
CopyTo<8> << <red8SignalGridSize, blocks, 0 >> > (_signalD, signalSize, _intermD, true);
gridSize = red8DicGridSize;
//gridSize.x = 80;
gridSize.y = signalSize;
int sharedMemSize = blocks.x / warpSize;
Matrix2Vector<8, false> << <gridSize, blocks, sharedMemSize >> > (_dictionaryD, _beta, _intermD, dictionaryWords, signalSize);
CUDA_CHECKD(hipPeekAtLastError());
// 3.3) We compute the new alpha with the thresholding at the end
CopyTo<8> << <red8DicGridSize, blocks, 0 >> > (_beta, dictionaryWords, _alphaNewD, false);
blocks.y = 1;
gridSize = red8SignalGridSize;
//gridSize.x = 80;
gridSize.y = (dictionaryWords + 1) / 2;
gridSize.y = dictionaryWords;
sharedMemSize = blocks.x / warpSize;
Matrix2Vector<8, true> << <gridSize, blocks, sharedMemSize >> > (_dictionaryInverseD, _intermD, _alphaNewD, signalSize, dictionaryWords);
CUDA_CHECKD(hipPeekAtLastError());
// NB: Benchmarks says that 128 threads per block should result in the best occupancy for the
// threshold kernel
blocks.y = 1;
ThresholdVector<8> << <red8DicGridSize, blocks >> > (_alphaNewD, dictionaryWords);
lambda = 1.01f * lambda;
// 3.4) We see how much alpha is changed
_alphaDiffSquareSum = 0.0f;
SquareDiffSumKrnlUnroll<8> << <red8DicGridSize, blocks, sharedMemSize >> > (_alphaNewD, _alphaD, dictionaryWords, &_alphaDiffSquareSum);
CUDA_CHECKD(hipDeviceSynchronize());
float norm = sqrtf(_alphaDiffSquareSum);
if (norm < 1e-5f) {
break;
}
}
printf("kernel iterations: %d\r\n", i);
delete[] _beta;
delete[] _intermD;
}
DeviceLiMapSv3::DeviceLiMapSv3(const float* solution, const float* signal, const float* D, const float* DINV, size_t dictionaryWords, size_t signalSize)
: BaseLiMapS(solution, signal, D, DINV, dictionaryWords, signalSize)
{
_alphaH.resize(_dictionaryWords);
// We create the cuda pointers here and then we copy the pointers values to the device symbols. In this way
// memory disposal should be automatically handled by the class
_solutionPtr = make_cuda<float>(dictionaryWords);
_signalPtr = make_cuda<float>(signalSize);
_dictionaryPtr = make_cuda<float>(dictionaryWords * signalSize);
_dictionaryInversePtr = make_cuda<float>(dictionaryWords * signalSize);
_alphaPtr = make_cuda<float>(dictionaryWords);
_alphaOldPtr = make_cuda<float>(dictionaryWords);
float* dummyPtr = _solutionPtr.get();
CUDA_CHECK(hipMemcpyToSymbol(_solutionD, &dummyPtr, sizeof(void*)));
dummyPtr = _signalPtr.get();
CUDA_CHECK(hipMemcpyToSymbol(_signalD, &dummyPtr, sizeof(void*)));
dummyPtr = _dictionaryPtr.get();
CUDA_CHECK(hipMemcpyToSymbol(_dictionaryD, &dummyPtr, sizeof(void*)));
dummyPtr = _dictionaryInversePtr.get();
CUDA_CHECK(hipMemcpyToSymbol(_dictionaryInverseD, &dummyPtr, sizeof(void*)));
dummyPtr = _alphaPtr.get();
CUDA_CHECK(hipMemcpyToSymbol(_alphaD, &dummyPtr, sizeof(void*)));
dummyPtr = _alphaOldPtr.get();
CUDA_CHECK(hipMemcpyToSymbol(_alphaNewD, &dummyPtr, sizeof(void*)));
}
void DeviceLiMapSv3::Execute(int iterations)
{
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// We lanuch the memory copies asyncronously here and then we wait on the sync point and the end of the function
// In this way we first enqueue all the work on the NULL stream and then we waiting, minimizing the "wasted" time in CPU-GPU
// command execution
CUDA_CHECK(hipMemcpyAsync(_signalPtr.get(), _signalHost, sizeof(float) * _signalSize, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyAsync(_dictionaryInversePtr.get(), _dictionaryInverseHost, sizeof(float) * _dictionaryWords * _signalSize, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyAsync(_dictionaryPtr.get(), _dictionaryHost, sizeof(float) * _dictionaryWords * _signalSize, hipMemcpyHostToDevice));
// LiMapS kernel will dynamically launch its own kernels. So only one thread is necessary
// By doing this, we can erase the CPU-GPU communication time for launching kernels
hipEventRecord(start);
LiMapS2 << < 1, 1 >> > (_dictionaryWords, _signalSize);
hipEventRecord(stop);
CUDA_CHECK(hipMemcpyAsync(_alphaH.data(), _alphaPtr.get(), sizeof(float) * _dictionaryWords, hipMemcpyDeviceToHost));
CUDA_CHECK(hipDeviceSynchronize());
// Let's just also measure the kernel exec time to see the mem copies/sync overhead
float ms;
hipEventElapsedTime(&ms, start, stop);
std::cout << "Event elapsed: " << ms << " ms" << std::endl;
}
| 0f8407b8e4dadb0edea78cc4d5a3d14ef2a5d6e1.cu | #include "DeviceLiMapSv3.cuh"
#include <iostream>
#include "cuda_shared.h"
#include <cooperative_groups.h>
#include <cuda/std/functional>
#include "cublas_shared.h"
#include "kernels/misc.cuh"
#include "kernels/square_sum.cuh"
#include "kernels/matrix2vector.cuh"
#include "kernels/threshold.cuh"
static __device__ float* _solutionD;
static __device__ float* _signalD;
static __device__ float* _dictionaryD;
static __device__ float* _dictionaryInverseD;
static __device__ float* _alphaD;
static __device__ float* _alphaNewD;
static __device__ float* _beta;
static __device__ float* _intermD;
static __device__ float _signalSquareSum;
static __device__ float _alphaDiffSquareSum;
template<int unrollFactor>
__global__ void GetAlphaImprv(size_t dictionaryWords, size_t signalSize) {
size_t idx = blockIdx.x * (blockDim.x * unrollFactor) + threadIdx.x;
size_t idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= dictionaryWords) return;
float data = 0.0f;
#pragma unroll
for (size_t i = 0; i < unrollFactor; i++)
{
size_t vOffset = idx + i * blockDim.x;
float dicInverse = vOffset < signalSize ? _dictionaryInverseD[idy * signalSize + vOffset] : 0.0f;
float signal = vOffset < signalSize ? _signalD[vOffset] : 0.0f;
//data += (dicInverse * signal);
data = fmaf(dicInverse, signal, data);
}
KernelReduce<void(float*, float*, float), float*, float*>(data, [](float* ptr1, float* ptr2, float sum) {
atomicAdd(ptr1, sum);
atomicAdd(ptr2, sum);
}, &_alphaD[idy], &_alphaNewD[idy]);
}
__global__ void LiMapS2(size_t dictionaryWords, size_t signalSize) {
// 1) The first step of the LiMapS algorithm is to calculate the starting lamba coefficient. In order to do so, we need to calculate
// the signal norm. So we enqueue on the default stream the SquareSum operation and then we wait for it.
// The norm is foundamental for the next steps so there is nothing that we can do to avoid the sync time waste
_signalSquareSum = 0.0f;
dim3 blocks(128);
dim3 red8DicGridSize = GetGridSize(blocks, dictionaryWords, 8);
dim3 red8SignalGridSize = GetGridSize(blocks, signalSize, 8);
SquareSumKrnlUnroll<8> << <red8SignalGridSize, blocks, blocks.x / warpSize >> > (_signalD, signalSize, &_signalSquareSum);
CUDA_CHECKD(cudaDeviceSynchronize());
assert(_signalSquareSum >= 0.0f);
float t = sqrtf(_signalSquareSum);
float lambda = 1.0f / t;
_beta = new float[dictionaryWords];
_intermD = new float[signalSize];
// 2) The second step of the algorithm is to prepare the starting alpha vector so also here we
// Launch the kernel calculation and we synchronize the device
// Is it necessary??
//FillZero<1> << <gridSize, blocks >> > (_alphaD, dictionaryWords);
//FillZero<1> << <gridSize, blocks >> > (_alphaOldD, dictionaryWords);
dim3 gridSize = red8SignalGridSize;
gridSize.y = dictionaryWords;
int sharedMemSize = blocks.x / warpSize;
GetAlphaImprv<8> << <gridSize, blocks, sharedMemSize >> > (dictionaryWords, signalSize);
CUDA_CHECKD(cudaPeekAtLastError());
CUDA_CHECKD(cudaDeviceSynchronize());
int i = 0;
for (i = 0; i < 1000; i++)
{
// We set the alphaOld as the current alpha. We can do this by just swapping the pointer, avoiding
// useless data transfer
cuda::std::swap(_alphaD, _alphaNewD);
// From here, we split our computation next alpha computation in different step. This is necessary since some calculation
// depend on data that should accessed after a global sync point (ex, after calculating the intermediate (dic * beta - sig) vector
// Since global sync CANNOT be achieved (at least in old devices that not support grid_group::sync() method), we can do better:
// we just queue our splitted work on the default stream, and then we just sync with the device at the end from this kenel.
// In this way, the work is executed with all data dependencies respected
// 3.1) We need to compute the beta vector for this iterarion
CalculateBeta<8> << <red8DicGridSize, blocks, 0 >> > (_alphaD, _beta, lambda, dictionaryWords);
// 3.2) We need to compute the intermediate (dic * beta - sig) vector
CopyTo<8> << <red8SignalGridSize, blocks, 0 >> > (_signalD, signalSize, _intermD, true);
gridSize = red8DicGridSize;
//gridSize.x = 80;
gridSize.y = signalSize;
int sharedMemSize = blocks.x / warpSize;
Matrix2Vector<8, false> << <gridSize, blocks, sharedMemSize >> > (_dictionaryD, _beta, _intermD, dictionaryWords, signalSize);
CUDA_CHECKD(cudaPeekAtLastError());
// 3.3) We compute the new alpha with the thresholding at the end
CopyTo<8> << <red8DicGridSize, blocks, 0 >> > (_beta, dictionaryWords, _alphaNewD, false);
blocks.y = 1;
gridSize = red8SignalGridSize;
//gridSize.x = 80;
gridSize.y = (dictionaryWords + 1) / 2;
gridSize.y = dictionaryWords;
sharedMemSize = blocks.x / warpSize;
Matrix2Vector<8, true> << <gridSize, blocks, sharedMemSize >> > (_dictionaryInverseD, _intermD, _alphaNewD, signalSize, dictionaryWords);
CUDA_CHECKD(cudaPeekAtLastError());
// NB: Benchmarks says that 128 threads per block should result in the best occupancy for the
// threshold kernel
blocks.y = 1;
ThresholdVector<8> << <red8DicGridSize, blocks >> > (_alphaNewD, dictionaryWords);
lambda = 1.01f * lambda;
// 3.4) We see how much alpha is changed
_alphaDiffSquareSum = 0.0f;
SquareDiffSumKrnlUnroll<8> << <red8DicGridSize, blocks, sharedMemSize >> > (_alphaNewD, _alphaD, dictionaryWords, &_alphaDiffSquareSum);
CUDA_CHECKD(cudaDeviceSynchronize());
float norm = sqrtf(_alphaDiffSquareSum);
if (norm < 1e-5f) {
break;
}
}
printf("kernel iterations: %d\r\n", i);
delete[] _beta;
delete[] _intermD;
}
DeviceLiMapSv3::DeviceLiMapSv3(const float* solution, const float* signal, const float* D, const float* DINV, size_t dictionaryWords, size_t signalSize)
: BaseLiMapS(solution, signal, D, DINV, dictionaryWords, signalSize)
{
_alphaH.resize(_dictionaryWords);
// We create the cuda pointers here and then we copy the pointers values to the device symbols. In this way
// memory disposal should be automatically handled by the class
_solutionPtr = make_cuda<float>(dictionaryWords);
_signalPtr = make_cuda<float>(signalSize);
_dictionaryPtr = make_cuda<float>(dictionaryWords * signalSize);
_dictionaryInversePtr = make_cuda<float>(dictionaryWords * signalSize);
_alphaPtr = make_cuda<float>(dictionaryWords);
_alphaOldPtr = make_cuda<float>(dictionaryWords);
float* dummyPtr = _solutionPtr.get();
CUDA_CHECK(cudaMemcpyToSymbol(_solutionD, &dummyPtr, sizeof(void*)));
dummyPtr = _signalPtr.get();
CUDA_CHECK(cudaMemcpyToSymbol(_signalD, &dummyPtr, sizeof(void*)));
dummyPtr = _dictionaryPtr.get();
CUDA_CHECK(cudaMemcpyToSymbol(_dictionaryD, &dummyPtr, sizeof(void*)));
dummyPtr = _dictionaryInversePtr.get();
CUDA_CHECK(cudaMemcpyToSymbol(_dictionaryInverseD, &dummyPtr, sizeof(void*)));
dummyPtr = _alphaPtr.get();
CUDA_CHECK(cudaMemcpyToSymbol(_alphaD, &dummyPtr, sizeof(void*)));
dummyPtr = _alphaOldPtr.get();
CUDA_CHECK(cudaMemcpyToSymbol(_alphaNewD, &dummyPtr, sizeof(void*)));
}
void DeviceLiMapSv3::Execute(int iterations)
{
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// We lanuch the memory copies asyncronously here and then we wait on the sync point and the end of the function
// In this way we first enqueue all the work on the NULL stream and then we waiting, minimizing the "wasted" time in CPU-GPU
// command execution
CUDA_CHECK(cudaMemcpyAsync(_signalPtr.get(), _signalHost, sizeof(float) * _signalSize, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyAsync(_dictionaryInversePtr.get(), _dictionaryInverseHost, sizeof(float) * _dictionaryWords * _signalSize, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyAsync(_dictionaryPtr.get(), _dictionaryHost, sizeof(float) * _dictionaryWords * _signalSize, cudaMemcpyHostToDevice));
// LiMapS kernel will dynamically launch its own kernels. So only one thread is necessary
// By doing this, we can erase the CPU-GPU communication time for launching kernels
cudaEventRecord(start);
LiMapS2 << < 1, 1 >> > (_dictionaryWords, _signalSize);
cudaEventRecord(stop);
CUDA_CHECK(cudaMemcpyAsync(_alphaH.data(), _alphaPtr.get(), sizeof(float) * _dictionaryWords, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaDeviceSynchronize());
// Let's just also measure the kernel exec time to see the mem copies/sync overhead
float ms;
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Event elapsed: " << ms << " ms" << std::endl;
}
|
de5991694b86958ea95842b01ddcd39f7a5afece.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/search.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/search.hpp>
#include <hash/unordered_multiset.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/strings/detail/utilities.hpp>
#include <thrust/binary_search.h>
#include <thrust/logical.h>
namespace cudf {
namespace {
template <typename DataIterator,
typename ValuesIterator,
typename OutputIterator,
typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
size_type data_size,
size_type values_size,
OutputIterator it_output,
Comparator comp,
bool find_first,
hipStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
} else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
}
}
std::unique_ptr<column> search_ordered(table_view const& t,
table_view const& values,
bool find_first,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
// Allocate result column
std::unique_ptr<column> result = make_numeric_column(
data_type{type_to_id<size_type>()}, values.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view result_view = result.get()->mutable_view();
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(hipMemset(result_view.data<size_type>(), 0, values.num_rows() * sizeof(size_type)));
return result;
}
if (not column_order.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == column_order.size(),
"Mismatch between number of columns and column order.");
}
if (not null_precedence.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == null_precedence.size(),
"Mismatch between number of columns and null precedence.");
}
auto d_t = table_device_view::create(t, stream);
auto d_values = table_device_view::create(values, stream);
auto count_it = thrust::make_counting_iterator<size_type>(0);
rmm::device_vector<order> d_column_order(column_order.begin(), column_order.end());
rmm::device_vector<null_order> d_null_precedence(null_precedence.begin(), null_precedence.end());
if (has_nulls(t) or has_nulls(values)) {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<true>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<true>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
} else {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<false>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<false>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
}
return result;
}
struct contains_scalar_dispatch {
template <typename Element>
bool operator()(column_view const& col,
scalar const& value,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
using ScalarType = cudf::scalar_type_t<Element>;
auto d_col = column_device_view::create(col, stream);
auto s = static_cast<const ScalarType*>(&value);
if (col.has_nulls()) {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->pair_begin<Element, true>(),
d_col->pair_end<Element, true>(),
thrust::make_pair(s->value(), true));
return found_iter != d_col->pair_end<Element, true>();
} else {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->begin<Element>(),
d_col->end<Element>(),
s->value());
return found_iter != d_col->end<Element>();
}
}
};
template <>
bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& col,
scalar const& value,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("dictionary type not supported yet");
}
template <>
bool contains_scalar_dispatch::operator()<cudf::list_view>(column_view const& col,
scalar const& value,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("list_view type not supported yet");
}
} // namespace
namespace detail {
bool contains(column_view const& col,
scalar const& value,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(col.type() == value.type(), "DTYPE mismatch");
if (col.size() == 0) { return false; }
if (not value.is_valid()) { return col.has_nulls(); }
return cudf::type_dispatcher(col.type(), contains_scalar_dispatch{}, col, value, stream, mr);
}
struct multi_contains_dispatch {
template <typename Element>
std::unique_ptr<column> operator()(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
std::unique_ptr<column> result = make_numeric_column(data_type{type_to_id<bool>()},
haystack.size(),
copy_bitmask(haystack),
haystack.null_count(),
stream,
mr);
if (haystack.size() == 0) { return result; }
mutable_column_view result_view = result.get()->mutable_view();
if (needles.size() == 0) {
thrust::fill(rmm::exec_policy(stream)->on(stream),
result_view.begin<bool>(),
result_view.end<bool>(),
false);
return result;
}
auto hash_set = cudf::detail::unordered_multiset<Element>::create(needles, stream);
auto device_hash_set = hash_set.to_device();
auto d_haystack_ptr = column_device_view::create(haystack, stream);
auto d_haystack = *d_haystack_ptr;
if (haystack.has_nulls()) {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return d_haystack.is_null_nocheck(index) ||
device_hash_set.contains(d_haystack.element<Element>(index));
});
} else {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return device_hash_set.contains(d_haystack.element<Element>(index));
});
}
return result;
}
};
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<dictionary32>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("dictionary type not supported");
}
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<list_view>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("list_view type not supported");
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch");
return cudf::type_dispatcher(
haystack.type(), multi_contains_dispatch{}, haystack, needles, mr, stream);
}
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return search_ordered(t, values, true, column_order, null_precedence, mr, stream);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return search_ordered(t, values, false, column_order, null_precedence, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::lower_bound(t, values, column_order, null_precedence, mr);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::upper_bound(t, values, column_order, null_precedence, mr);
}
bool contains(column_view const& col, scalar const& value, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(col, value, mr);
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(haystack, needles, mr);
}
} // namespace cudf
| de5991694b86958ea95842b01ddcd39f7a5afece.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/search.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/search.hpp>
#include <hash/unordered_multiset.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/strings/detail/utilities.hpp>
#include <thrust/binary_search.h>
#include <thrust/logical.h>
namespace cudf {
namespace {
template <typename DataIterator,
typename ValuesIterator,
typename OutputIterator,
typename Comparator>
void launch_search(DataIterator it_data,
ValuesIterator it_vals,
size_type data_size,
size_type values_size,
OutputIterator it_output,
Comparator comp,
bool find_first,
cudaStream_t stream)
{
if (find_first) {
thrust::lower_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
} else {
thrust::upper_bound(rmm::exec_policy(stream)->on(stream),
it_data,
it_data + data_size,
it_vals,
it_vals + values_size,
it_output,
comp);
}
}
std::unique_ptr<column> search_ordered(table_view const& t,
table_view const& values,
bool find_first,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
// Allocate result column
std::unique_ptr<column> result = make_numeric_column(
data_type{type_to_id<size_type>()}, values.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view result_view = result.get()->mutable_view();
// Handle empty inputs
if (t.num_rows() == 0) {
CUDA_TRY(cudaMemset(result_view.data<size_type>(), 0, values.num_rows() * sizeof(size_type)));
return result;
}
if (not column_order.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == column_order.size(),
"Mismatch between number of columns and column order.");
}
if (not null_precedence.empty()) {
CUDF_EXPECTS(static_cast<std::size_t>(t.num_columns()) == null_precedence.size(),
"Mismatch between number of columns and null precedence.");
}
auto d_t = table_device_view::create(t, stream);
auto d_values = table_device_view::create(values, stream);
auto count_it = thrust::make_counting_iterator<size_type>(0);
rmm::device_vector<order> d_column_order(column_order.begin(), column_order.end());
rmm::device_vector<null_order> d_null_precedence(null_precedence.begin(), null_precedence.end());
if (has_nulls(t) or has_nulls(values)) {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<true>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<true>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
} else {
auto ineq_op =
(find_first)
? row_lexicographic_comparator<false>(
*d_t, *d_values, d_column_order.data().get(), d_null_precedence.data().get())
: row_lexicographic_comparator<false>(
*d_values, *d_t, d_column_order.data().get(), d_null_precedence.data().get());
launch_search(count_it,
count_it,
t.num_rows(),
values.num_rows(),
result_view.data<size_type>(),
ineq_op,
find_first,
stream);
}
return result;
}
struct contains_scalar_dispatch {
template <typename Element>
bool operator()(column_view const& col,
scalar const& value,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
using ScalarType = cudf::scalar_type_t<Element>;
auto d_col = column_device_view::create(col, stream);
auto s = static_cast<const ScalarType*>(&value);
if (col.has_nulls()) {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->pair_begin<Element, true>(),
d_col->pair_end<Element, true>(),
thrust::make_pair(s->value(), true));
return found_iter != d_col->pair_end<Element, true>();
} else {
auto found_iter = thrust::find(rmm::exec_policy(stream)->on(stream),
d_col->begin<Element>(),
d_col->end<Element>(),
s->value());
return found_iter != d_col->end<Element>();
}
}
};
template <>
bool contains_scalar_dispatch::operator()<cudf::dictionary32>(column_view const& col,
scalar const& value,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("dictionary type not supported yet");
}
template <>
bool contains_scalar_dispatch::operator()<cudf::list_view>(column_view const& col,
scalar const& value,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FAIL("list_view type not supported yet");
}
} // namespace
namespace detail {
bool contains(column_view const& col,
scalar const& value,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(col.type() == value.type(), "DTYPE mismatch");
if (col.size() == 0) { return false; }
if (not value.is_valid()) { return col.has_nulls(); }
return cudf::type_dispatcher(col.type(), contains_scalar_dispatch{}, col, value, stream, mr);
}
struct multi_contains_dispatch {
template <typename Element>
std::unique_ptr<column> operator()(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
std::unique_ptr<column> result = make_numeric_column(data_type{type_to_id<bool>()},
haystack.size(),
copy_bitmask(haystack),
haystack.null_count(),
stream,
mr);
if (haystack.size() == 0) { return result; }
mutable_column_view result_view = result.get()->mutable_view();
if (needles.size() == 0) {
thrust::fill(rmm::exec_policy(stream)->on(stream),
result_view.begin<bool>(),
result_view.end<bool>(),
false);
return result;
}
auto hash_set = cudf::detail::unordered_multiset<Element>::create(needles, stream);
auto device_hash_set = hash_set.to_device();
auto d_haystack_ptr = column_device_view::create(haystack, stream);
auto d_haystack = *d_haystack_ptr;
if (haystack.has_nulls()) {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return d_haystack.is_null_nocheck(index) ||
device_hash_set.contains(d_haystack.element<Element>(index));
});
} else {
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(haystack.size()),
result_view.begin<bool>(),
[device_hash_set, d_haystack] __device__(size_t index) {
return device_hash_set.contains(d_haystack.element<Element>(index));
});
}
return result;
}
};
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<dictionary32>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("dictionary type not supported");
}
template <>
std::unique_ptr<column> multi_contains_dispatch::operator()<list_view>(
column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("list_view type not supported");
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(haystack.type() == needles.type(), "DTYPE mismatch");
return cudf::type_dispatcher(
haystack.type(), multi_contains_dispatch{}, haystack, needles, mr, stream);
}
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return search_ordered(t, values, true, column_order, null_precedence, mr, stream);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return search_ordered(t, values, false, column_order, null_precedence, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<column> lower_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::lower_bound(t, values, column_order, null_precedence, mr);
}
std::unique_ptr<column> upper_bound(table_view const& t,
table_view const& values,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::upper_bound(t, values, column_order, null_precedence, mr);
}
bool contains(column_view const& col, scalar const& value, rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(col, value, mr);
}
std::unique_ptr<column> contains(column_view const& haystack,
column_view const& needles,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::contains(haystack, needles, mr);
}
} // namespace cudf
|
9ae16e3c551bffa1218b92b856c3685821320e9c.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef MODEL_BUILDER
#define MODEL_BUILDER
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <sstream>
#include <vector>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip_util.hip"
using namespace std;
class ModelBuilder
{
int length[DIM];
// DEVICE VARIABLE DECLARATION
OutputController *output_controller_d;
Lattice *lattice_d;
Domain *domain_d;
DomainConstant *domain_constants_d;
double **f_d, *rho_d, **u_d, *geometry_d, **force_d;
int *micro_bc_d;
int *macro_bc_d;
// HOST VARIABLE DECLARATION
Timing *time_t;
ProjectStrings *project_t;
OutputController *output_controller_h;
Lattice *lattice_h, *lattice_d_prototype;
Domain *domain_h;
DomainConstant *domain_constants_h;
double **f_h, *rho_h, **u_h, *geometry_h, **force_h;
int *micro_bc_h;
int *macro_bc_h;
// SCALAR DECLARATION (PLATFORM AGNOSTIC)
double tau, residual;
double tolerance;
int domain_size, maxT, saveT, steadyT, collision_type;
ptrdiff_t palabos_domain_size, palabos_maxT, palabos_saveT, palabos_steadyT, palabos_collision_type;
// CONFIG FLAGS AND STRINGS
char *fname_config;
bool zhou_he;
bool forcing;
bool is2D;
//!!!Grid's information
ptrdiff_t nx;
ptrdiff_t ny;
ptrdiff_t nz;
double palabos_omega;
// Allocates memory for variables which are constant in size
void constant_size_allocator()
{
// Allocate container structures
//combi_malloc<Lattice>(&lattice_h, &lattice_d, sizeof(Lattice));
//combi_malloc<Domain>(&domain_h, &domain_d, sizeof(Domain));
//combi_malloc<DomainConstant>(&domain_constants_h, &domain_constants_d, sizeof(DomainConstant));
//combi_malloc<OutputController>(&output_controller_h, &output_controller_d, sizeof(OutputController));
//domain_constants_h = (DomainConstant *)malloc(sizeof(DomainConstant));
//time_t = (Timing *)malloc(sizeof(Timing));
//project_t = (ProjectStrings *)malloc(sizeof(ProjectStrings));
}
void constant_loader()
{
// LOAD CONSTANTS FROM FILE
// LOAD LATTICE CONSTANTS
LOAD_E(domain_constants_h->e);
LOAD_OMEGA(domain_constants_h->omega);
LOAD_OPP(domain_constants_h->opp);
LOAD_M(domain_constants_h->M);
LOAD_M_INV(domain_constants_h->M_inv);
for(int i =0;i<NUM_RESIDS;i++)
{
domain_constants_h->residual[i] = 1;
}
//transfer domain_constants to device (cant think of a better place to put this)
//cudasafe(hipMemcpy(domain_constants_d, domain_constants_h, sizeof(DomainConstant),hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
cudasafe(hipMemcpyToSymbol("domain_constants", domain_constants_h, sizeof(DomainConstant)),"Model Builder: Copy to device memory failed!");
cudasafe(hipMemcpy(output_controller_d, output_controller_h, sizeof(OutputController),hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
//Allcate size to variable but with other grid parameter than the original
void palabos_variable_size_allocator(){
palabos_domain_size = 1;
//!!!!
palabos_domain_size = nx*ny*nz;
ptrdiff_t palabos_domain_data_size;
palabos_domain_data_size = palabos_domain_size*sizeof(double);
// Allocate required arrays
// PDFS
double *f_tmp[Q];
combi_malloc<double*>(&f_h, &f_d, sizeof(double*)*Q);
for(int i=0;i<Q;i++)
{
combi_malloc<double>(&f_h[i], &f_tmp[i], palabos_domain_data_size);
}
cudasafe(hipMemcpy(f_d,f_tmp,sizeof(double*)*Q,hipMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// RHO
combi_malloc<double>(&rho_h, &rho_d, palabos_domain_data_size);
// VELOCITY
double *u_tmp[DIM];
combi_malloc<double*>(&u_h, &u_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&u_h[i], &u_tmp[i], palabos_domain_data_size);
}
cudasafe(hipMemcpy(u_d,u_tmp,sizeof(double*)*DIM, hipMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// GEOMETRY
combi_malloc<double>(&geometry_h, &geometry_d, palabos_domain_data_size);
// ALLOCATE OPTION ARRAYS
// FORCING
if(domain_constants_h->forcing == true)
{
double *force_tmp[DIM];
combi_malloc<double*>(&force_h, &force_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&force_h[i], &force_tmp[i], palabos_domain_data_size);
}
cudasafe(hipMemcpy(force_d,force_tmp,sizeof(double*)*DIM, hipMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
}
// MICRO BC
if(domain_constants_h->micro_bc == true)
{
combi_malloc<int>(µ_bc_h, µ_bc_d,palabos_domain_data_size);
}
// MACRO BC
if(domain_constants_h->macro_bc == true)
{
combi_malloc<int>(¯o_bc_h, ¯o_bc_d, palabos_domain_data_size);
}
}
// Allocates memory for variables which have variable size due to problem geometry
/*void variable_size_allocator()
{
domain_size = 1;
for(int d = 0; d<DIM; d++)
{
domain_size = domain_size*domain_constants_h->length[d];
}
//!!!!
//domain_size = this->nx*this->ny*this->this.nz;
int domain_data_size;
domain_data_size = domain_size*sizeof(double);
// Allocate required arrays
// PDFS
double *f_tmp[Q];
combi_malloc<double*>(&f_h, &f_d, sizeof(double*)*Q);
for(int i=0;i<Q;i++)
{
combi_malloc<double>(&f_h[i], &f_tmp[i], domain_data_size);
}
cudasafe(hipMemcpy(f_d,f_tmp,sizeof(double*)*Q,hipMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// RHO
combi_malloc<double>(&rho_h, &rho_d, domain_data_size);
// VELOCITY
double *u_tmp[DIM];
combi_malloc<double*>(&u_h, &u_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&u_h[i], &u_tmp[i], domain_data_size);
}
cudasafe(hipMemcpy(u_d,u_tmp,sizeof(double*)*DIM, hipMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// GEOMETRY
combi_malloc<double>(&geometry_h, &geometry_d, domain_data_size);
// ALLOCATE OPTION ARRAYS
// FORCING
if(domain_constants_h->forcing == true)
{
double *force_tmp[DIM];
combi_malloc<double*>(&force_h, &force_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&force_h[i], &force_tmp[i], domain_data_size);
}
cudasafe(hipMemcpy(force_d,force_tmp,sizeof(double*)*DIM, hipMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
}
// MICRO BC
if(domain_constants_h->micro_bc == true)
{
combi_malloc<int>(µ_bc_h, µ_bc_d, domain_data_size);
}
// MACRO BC
if(domain_constants_h->macro_bc == true)
{
combi_malloc<int>(¯o_bc_h, ¯o_bc_d, domain_data_size);
}
}*/
void variable_assembler()
{
lattice_h->f = f_h;
Lattice *lattice_d_tmp = (Lattice *)malloc(sizeof(Lattice));
lattice_d_tmp->f = f_d;
cudasafe(hipMemcpy(lattice_d, lattice_d_tmp, sizeof(Lattice),hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
domain_h->micro_bc = micro_bc_h;
domain_h->macro_bc = macro_bc_h;
domain_h->geometry = geometry_h;
domain_h->force = force_h;
domain_h->u = u_h;
domain_h->rho = rho_h;
Domain *domain_d_tmp = (Domain *)malloc(sizeof(Domain));
domain_d_tmp->micro_bc = micro_bc_d;
domain_d_tmp->macro_bc = macro_bc_d;
domain_d_tmp->geometry = geometry_d;
domain_d_tmp->force = force_d;
domain_d_tmp->u = u_d;
domain_d_tmp->rho = rho_d;
cudasafe(hipMemcpy(domain_d, domain_d_tmp, sizeof(Domain),hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
void variable_loader()
{
// LOAD GEOMETRY
cudasafe(hipMemcpy(geometry_d, geometry_h, sizeof(double)*domain_size,hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
// LOAD FORCES IF REQUIRED
if(domain_constants_h->forcing == true)
{
char force_labels[3][33];
strcpy(force_labels[0], "ForceX");
strcpy(force_labels[1], "ForceY");
strcpy(force_labels[2], "ForceZ");
double *force_d_tmp[DIM];
cudasafe(hipMemcpy(force_d_tmp, force_d, sizeof(double*)*DIM,hipMemcpyDeviceToHost),"Model Builder: Copy from device memory failed!");
for(int d=0;d<DIM;d++)
{
cudasafe(hipMemcpy(force_d_tmp[d], force_h[d], sizeof(double)*domain_size,hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
}
// LOAD MICRO BOUNDARY CONDITIONS IF REQUIRED
if(domain_constants_h->micro_bc == true)
{
cudasafe(hipMemcpy(micro_bc_d, micro_bc_h, sizeof(int)*domain_size,hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
// LOAD MACRO BOUNDARY CONDITIONS IF REQUIRED
if(domain_constants_h->macro_bc == true)
{
cudasafe(hipMemcpy(macro_bc_d, macro_bc_h, sizeof(int)*domain_size,hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
char vel_labels[3][33];
strcpy(vel_labels[0], "VelocityX");
strcpy(vel_labels[1], "VelocityY");
strcpy(vel_labels[2], "VelocityZ");
double *u_d_tmp[DIM];
cudasafe(hipMemcpy(u_d_tmp, u_d, sizeof(double*)*DIM,hipMemcpyDeviceToHost),"Model Builder: Copy from device memory failed!");
for(int d=0;d<DIM;d++)
{
cudasafe(hipMemcpy(u_d_tmp[d], u_h[d], sizeof(double)*domain_size,hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
cudasafe(hipMemcpy(rho_d, rho_h, sizeof(double)*domain_size,hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
if(domain_constants_h->init_type == 0)
{
load_static_IC();
}
}
void load_static_IC()
{
double *f_d_tmp[Q];
cudasafe(hipMemcpy(f_d_tmp, f_d, sizeof(double*)*Q,hipMemcpyDeviceToHost),"Model Builder: Copy from device memory failed!");
double omega[Q];
LOAD_OMEGA(omega);
for(int i=0;i<Q;i++)
{
for(int index=0;index<(domain_size);index++)
{
lattice_h->f[i][index] = 1.0*omega[i];
}
cudasafe(hipMemcpy(f_d_tmp[i], f_h[i], sizeof(double)*domain_size,hipMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
}
public:
ModelBuilder (char *, Lattice*, Lattice*, DomainConstant*, DomainConstant*, Domain*, Domain*, OutputController*, OutputController*, Timing*, ProjectStrings*);
ModelBuilder (char *, Lattice*, Lattice*, DomainConstant*, DomainConstant*, Domain*, Domain*, OutputController*, OutputController*, Timing*, ProjectStrings*, ptrdiff_t, ptrdiff_t, ptrdiff_t, double);
ModelBuilder ();
};
//Erhardt Markus : load host_value
/*void load_value(double omega, double nx,double ny, double nz, double *velocities, *densities)
{
}*/
/*ModelBuilder::ModelBuilder (char *input_filename, Lattice *lattice_host, Lattice *lattice_device, DomainConstant *domain_constants_host, DomainConstant *domain_constants_device, Domain *domain_host, Domain *domain_device, OutputController *output_controller_host, OutputController *output_controller_device, Timing *time, ProjectStrings *project)
{
lattice_h= lattice_host;
lattice_d= lattice_device;
domain_constants_h= domain_constants_host;
domain_constants_d= domain_constants_device;
domain_h= domain_host;ptrdiff_t
domain_d= domain_device;
output_controller_h= output_controller_host;
output_controller_d = output_controller_device;
time_t = time;
project_t = project;
fname_config = input_filename;
constant_size_allocator();
constant_loader();
variable_size_allocator();
variable_assembler();
cout << "variable assembler complete" << endl;
variable_loader();
cout << "variable loader complete" << endl;
}
*/
//Add this paramater later : double **pop, double **vel, double **dens
ModelBuilder::ModelBuilder (char *input_filename, Lattice *lattice_host, Lattice *lattice_device, DomainConstant *domain_constants_host, DomainConstant *domain_constants_device, Domain *domain_host, Domain *domain_device, OutputController *output_controller_host, OutputController *output_controller_device, Timing *time, ProjectStrings *project, ptrdiff_t nx, ptrdiff_t ny, ptrdiff_t nz, double omega)
{
//Get the grid's parameter
this->nx = nx;
this->ny = ny;
this->nz = nz;
this->palabos_omega = omega;
lattice_h= lattice_host;
lattice_d= lattice_device;
domain_constants_h= domain_constants_host;
domain_constants_d= domain_constants_device;
domain_h= domain_host;
domain_d= domain_device;
output_controller_h= output_controller_host;
output_controller_d = output_controller_device;
time_t = time;
project_t = project;
fname_config = input_filename;
constant_size_allocator();
constant_loader();
palabos_variable_size_allocator();
variable_assembler();
cout << "variable assembler complete" << endl;
variable_loader();
cout << "variable loader complete" << endl;
}
ModelBuilder::ModelBuilder (){}
#endif
| 9ae16e3c551bffa1218b92b856c3685821320e9c.cu | #ifndef MODEL_BUILDER
#define MODEL_BUILDER
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <sstream>
#include <vector>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_util.cu"
using namespace std;
class ModelBuilder
{
int length[DIM];
// DEVICE VARIABLE DECLARATION
OutputController *output_controller_d;
Lattice *lattice_d;
Domain *domain_d;
DomainConstant *domain_constants_d;
double **f_d, *rho_d, **u_d, *geometry_d, **force_d;
int *micro_bc_d;
int *macro_bc_d;
// HOST VARIABLE DECLARATION
Timing *time_t;
ProjectStrings *project_t;
OutputController *output_controller_h;
Lattice *lattice_h, *lattice_d_prototype;
Domain *domain_h;
DomainConstant *domain_constants_h;
double **f_h, *rho_h, **u_h, *geometry_h, **force_h;
int *micro_bc_h;
int *macro_bc_h;
// SCALAR DECLARATION (PLATFORM AGNOSTIC)
double tau, residual;
double tolerance;
int domain_size, maxT, saveT, steadyT, collision_type;
ptrdiff_t palabos_domain_size, palabos_maxT, palabos_saveT, palabos_steadyT, palabos_collision_type;
// CONFIG FLAGS AND STRINGS
char *fname_config;
bool zhou_he;
bool forcing;
bool is2D;
//!!!Grid's information
ptrdiff_t nx;
ptrdiff_t ny;
ptrdiff_t nz;
double palabos_omega;
// Allocates memory for variables which are constant in size
void constant_size_allocator()
{
// Allocate container structures
//combi_malloc<Lattice>(&lattice_h, &lattice_d, sizeof(Lattice));
//combi_malloc<Domain>(&domain_h, &domain_d, sizeof(Domain));
//combi_malloc<DomainConstant>(&domain_constants_h, &domain_constants_d, sizeof(DomainConstant));
//combi_malloc<OutputController>(&output_controller_h, &output_controller_d, sizeof(OutputController));
//domain_constants_h = (DomainConstant *)malloc(sizeof(DomainConstant));
//time_t = (Timing *)malloc(sizeof(Timing));
//project_t = (ProjectStrings *)malloc(sizeof(ProjectStrings));
}
void constant_loader()
{
// LOAD CONSTANTS FROM FILE
// LOAD LATTICE CONSTANTS
LOAD_E(domain_constants_h->e);
LOAD_OMEGA(domain_constants_h->omega);
LOAD_OPP(domain_constants_h->opp);
LOAD_M(domain_constants_h->M);
LOAD_M_INV(domain_constants_h->M_inv);
for(int i =0;i<NUM_RESIDS;i++)
{
domain_constants_h->residual[i] = 1;
}
//transfer domain_constants to device (cant think of a better place to put this)
//cudasafe(cudaMemcpy(domain_constants_d, domain_constants_h, sizeof(DomainConstant),cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
cudasafe(cudaMemcpyToSymbol("domain_constants", domain_constants_h, sizeof(DomainConstant)),"Model Builder: Copy to device memory failed!");
cudasafe(cudaMemcpy(output_controller_d, output_controller_h, sizeof(OutputController),cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
//Allcate size to variable but with other grid parameter than the original
void palabos_variable_size_allocator(){
palabos_domain_size = 1;
//!!!!
palabos_domain_size = nx*ny*nz;
ptrdiff_t palabos_domain_data_size;
palabos_domain_data_size = palabos_domain_size*sizeof(double);
// Allocate required arrays
// PDFS
double *f_tmp[Q];
combi_malloc<double*>(&f_h, &f_d, sizeof(double*)*Q);
for(int i=0;i<Q;i++)
{
combi_malloc<double>(&f_h[i], &f_tmp[i], palabos_domain_data_size);
}
cudasafe(cudaMemcpy(f_d,f_tmp,sizeof(double*)*Q,cudaMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// RHO
combi_malloc<double>(&rho_h, &rho_d, palabos_domain_data_size);
// VELOCITY
double *u_tmp[DIM];
combi_malloc<double*>(&u_h, &u_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&u_h[i], &u_tmp[i], palabos_domain_data_size);
}
cudasafe(cudaMemcpy(u_d,u_tmp,sizeof(double*)*DIM, cudaMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// GEOMETRY
combi_malloc<double>(&geometry_h, &geometry_d, palabos_domain_data_size);
// ALLOCATE OPTION ARRAYS
// FORCING
if(domain_constants_h->forcing == true)
{
double *force_tmp[DIM];
combi_malloc<double*>(&force_h, &force_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&force_h[i], &force_tmp[i], palabos_domain_data_size);
}
cudasafe(cudaMemcpy(force_d,force_tmp,sizeof(double*)*DIM, cudaMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
}
// MICRO BC
if(domain_constants_h->micro_bc == true)
{
combi_malloc<int>(µ_bc_h, µ_bc_d,palabos_domain_data_size);
}
// MACRO BC
if(domain_constants_h->macro_bc == true)
{
combi_malloc<int>(¯o_bc_h, ¯o_bc_d, palabos_domain_data_size);
}
}
// Allocates memory for variables which have variable size due to problem geometry
/*void variable_size_allocator()
{
domain_size = 1;
for(int d = 0; d<DIM; d++)
{
domain_size = domain_size*domain_constants_h->length[d];
}
//!!!!
//domain_size = this->nx*this->ny*this->this.nz;
int domain_data_size;
domain_data_size = domain_size*sizeof(double);
// Allocate required arrays
// PDFS
double *f_tmp[Q];
combi_malloc<double*>(&f_h, &f_d, sizeof(double*)*Q);
for(int i=0;i<Q;i++)
{
combi_malloc<double>(&f_h[i], &f_tmp[i], domain_data_size);
}
cudasafe(cudaMemcpy(f_d,f_tmp,sizeof(double*)*Q,cudaMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// RHO
combi_malloc<double>(&rho_h, &rho_d, domain_data_size);
// VELOCITY
double *u_tmp[DIM];
combi_malloc<double*>(&u_h, &u_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&u_h[i], &u_tmp[i], domain_data_size);
}
cudasafe(cudaMemcpy(u_d,u_tmp,sizeof(double*)*DIM, cudaMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
// GEOMETRY
combi_malloc<double>(&geometry_h, &geometry_d, domain_data_size);
// ALLOCATE OPTION ARRAYS
// FORCING
if(domain_constants_h->forcing == true)
{
double *force_tmp[DIM];
combi_malloc<double*>(&force_h, &force_d, sizeof(double*)*DIM);
for(int i=0;i<DIM;i++)
{
combi_malloc<double>(&force_h[i], &force_tmp[i], domain_data_size);
}
cudasafe(cudaMemcpy(force_d,force_tmp,sizeof(double*)*DIM, cudaMemcpyHostToDevice), "Model Builder: Device memory allocation failed!");
}
// MICRO BC
if(domain_constants_h->micro_bc == true)
{
combi_malloc<int>(µ_bc_h, µ_bc_d, domain_data_size);
}
// MACRO BC
if(domain_constants_h->macro_bc == true)
{
combi_malloc<int>(¯o_bc_h, ¯o_bc_d, domain_data_size);
}
}*/
void variable_assembler()
{
lattice_h->f = f_h;
Lattice *lattice_d_tmp = (Lattice *)malloc(sizeof(Lattice));
lattice_d_tmp->f = f_d;
cudasafe(cudaMemcpy(lattice_d, lattice_d_tmp, sizeof(Lattice),cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
domain_h->micro_bc = micro_bc_h;
domain_h->macro_bc = macro_bc_h;
domain_h->geometry = geometry_h;
domain_h->force = force_h;
domain_h->u = u_h;
domain_h->rho = rho_h;
Domain *domain_d_tmp = (Domain *)malloc(sizeof(Domain));
domain_d_tmp->micro_bc = micro_bc_d;
domain_d_tmp->macro_bc = macro_bc_d;
domain_d_tmp->geometry = geometry_d;
domain_d_tmp->force = force_d;
domain_d_tmp->u = u_d;
domain_d_tmp->rho = rho_d;
cudasafe(cudaMemcpy(domain_d, domain_d_tmp, sizeof(Domain),cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
void variable_loader()
{
// LOAD GEOMETRY
cudasafe(cudaMemcpy(geometry_d, geometry_h, sizeof(double)*domain_size,cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
// LOAD FORCES IF REQUIRED
if(domain_constants_h->forcing == true)
{
char force_labels[3][33];
strcpy(force_labels[0], "ForceX");
strcpy(force_labels[1], "ForceY");
strcpy(force_labels[2], "ForceZ");
double *force_d_tmp[DIM];
cudasafe(cudaMemcpy(force_d_tmp, force_d, sizeof(double*)*DIM,cudaMemcpyDeviceToHost),"Model Builder: Copy from device memory failed!");
for(int d=0;d<DIM;d++)
{
cudasafe(cudaMemcpy(force_d_tmp[d], force_h[d], sizeof(double)*domain_size,cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
}
// LOAD MICRO BOUNDARY CONDITIONS IF REQUIRED
if(domain_constants_h->micro_bc == true)
{
cudasafe(cudaMemcpy(micro_bc_d, micro_bc_h, sizeof(int)*domain_size,cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
// LOAD MACRO BOUNDARY CONDITIONS IF REQUIRED
if(domain_constants_h->macro_bc == true)
{
cudasafe(cudaMemcpy(macro_bc_d, macro_bc_h, sizeof(int)*domain_size,cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
char vel_labels[3][33];
strcpy(vel_labels[0], "VelocityX");
strcpy(vel_labels[1], "VelocityY");
strcpy(vel_labels[2], "VelocityZ");
double *u_d_tmp[DIM];
cudasafe(cudaMemcpy(u_d_tmp, u_d, sizeof(double*)*DIM,cudaMemcpyDeviceToHost),"Model Builder: Copy from device memory failed!");
for(int d=0;d<DIM;d++)
{
cudasafe(cudaMemcpy(u_d_tmp[d], u_h[d], sizeof(double)*domain_size,cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
cudasafe(cudaMemcpy(rho_d, rho_h, sizeof(double)*domain_size,cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
if(domain_constants_h->init_type == 0)
{
load_static_IC();
}
}
void load_static_IC()
{
double *f_d_tmp[Q];
cudasafe(cudaMemcpy(f_d_tmp, f_d, sizeof(double*)*Q,cudaMemcpyDeviceToHost),"Model Builder: Copy from device memory failed!");
double omega[Q];
LOAD_OMEGA(omega);
for(int i=0;i<Q;i++)
{
for(int index=0;index<(domain_size);index++)
{
lattice_h->f[i][index] = 1.0*omega[i];
}
cudasafe(cudaMemcpy(f_d_tmp[i], f_h[i], sizeof(double)*domain_size,cudaMemcpyHostToDevice),"Model Builder: Copy to device memory failed!");
}
}
public:
ModelBuilder (char *, Lattice*, Lattice*, DomainConstant*, DomainConstant*, Domain*, Domain*, OutputController*, OutputController*, Timing*, ProjectStrings*);
ModelBuilder (char *, Lattice*, Lattice*, DomainConstant*, DomainConstant*, Domain*, Domain*, OutputController*, OutputController*, Timing*, ProjectStrings*, ptrdiff_t, ptrdiff_t, ptrdiff_t, double);
ModelBuilder ();
};
//Erhardt Markus : load host_value
/*void load_value(double omega, double nx,double ny, double nz, double *velocities, *densities)
{
}*/
/*ModelBuilder::ModelBuilder (char *input_filename, Lattice *lattice_host, Lattice *lattice_device, DomainConstant *domain_constants_host, DomainConstant *domain_constants_device, Domain *domain_host, Domain *domain_device, OutputController *output_controller_host, OutputController *output_controller_device, Timing *time, ProjectStrings *project)
{
lattice_h= lattice_host;
lattice_d= lattice_device;
domain_constants_h= domain_constants_host;
domain_constants_d= domain_constants_device;
domain_h= domain_host;ptrdiff_t
domain_d= domain_device;
output_controller_h= output_controller_host;
output_controller_d = output_controller_device;
time_t = time;
project_t = project;
fname_config = input_filename;
constant_size_allocator();
constant_loader();
variable_size_allocator();
variable_assembler();
cout << "variable assembler complete" << endl;
variable_loader();
cout << "variable loader complete" << endl;
}
*/
//Add this paramater later : double **pop, double **vel, double **dens
ModelBuilder::ModelBuilder (char *input_filename, Lattice *lattice_host, Lattice *lattice_device, DomainConstant *domain_constants_host, DomainConstant *domain_constants_device, Domain *domain_host, Domain *domain_device, OutputController *output_controller_host, OutputController *output_controller_device, Timing *time, ProjectStrings *project, ptrdiff_t nx, ptrdiff_t ny, ptrdiff_t nz, double omega)
{
//Get the grid's parameter
this->nx = nx;
this->ny = ny;
this->nz = nz;
this->palabos_omega = omega;
lattice_h= lattice_host;
lattice_d= lattice_device;
domain_constants_h= domain_constants_host;
domain_constants_d= domain_constants_device;
domain_h= domain_host;
domain_d= domain_device;
output_controller_h= output_controller_host;
output_controller_d = output_controller_device;
time_t = time;
project_t = project;
fname_config = input_filename;
constant_size_allocator();
constant_loader();
palabos_variable_size_allocator();
variable_assembler();
cout << "variable assembler complete" << endl;
variable_loader();
cout << "variable loader complete" << endl;
}
ModelBuilder::ModelBuilder (){}
#endif
|
73214d1e5b79eb0172b58deceab227ff77815fa4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
return 0;
}
| 73214d1e5b79eb0172b58deceab227ff77815fa4.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
return 0;
}
|
c73df7a19b67b7e3e02b3d67a8db442c2c32efcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
static char help[] = "Solves -Laplacian u - exp(u) = 0, 0 < x < 1 using GPU\n\n";
/*
Same as ex47.c except it also uses the GPU to evaluate the function
*/
// TODO: CAUTION sort header needs to be first or getting compile errors??
// TODO: CAUTION sort header needs to be first or getting compile errors??
// TODO: CAUTION sort header needs to be first or getting compile errors??
#include <thrust/sort.h>
// petsc includes
#include <petscdmda.h>
#include <petscsnes.h>
#include <petsccusp.h>
// cusp includes
#include "cusp/detail/device/utils.h"
extern PetscErrorCode ComputeFunction(SNES,Vec,Vec,void*);
PetscBool useCUSP = PETSC_FALSE;
PetscBool jacobianComputed = PETSC_FALSE;
PetscLogEvent LogFunction = 0;
__device__ PetscInt *cudaTest;
struct LinearHexMesh
{
LinearHexMesh(PetscInt numelements) :
m_NumElements (numelements)
{
m_NodesPerElement = 8 ;
element_residuals->resize(numelements*m_NodesPerElement);
}
// number of elements
PetscInt m_NumElements;
// number of nodes per element
PetscInt m_NodesPerElement ;
// node coordinates
CUSPARRAY *m_NodeXCoord, *m_NodeYCoord, *m_NodeZCoord;
// solution and residual
CUSPARRAY *uarray,*farray;
// temporary vector to hold element wise residual
// 8 residual entries per element (one for each node)
CUSPARRAY *element_residuals;
// connectivity information is stored per node for structure of array access
CUSPINTARRAYGPU *m_Connectivity0,
*m_Connectivity1,
*m_Connectivity2,
*m_Connectivity3,
*m_Connectivity4,
*m_Connectivity5,
*m_Connectivity6,
*m_Connectivity7;
CUSPINTARRAYGPU *m_GlobalLocalMap0,
*m_GlobalLocalMap1,
*m_GlobalLocalMap2,
*m_GlobalLocalMap3,
*m_GlobalLocalMap4,
*m_GlobalLocalMap5,
*m_GlobalLocalMap6,
*m_GlobalLocalMap7;
CUSPINTARRAYGPU *m_LocalElementMap;
typedef CUSPARRAY::iterator PetscScalarIter;
typedef CUSPINTARRAYGPU::iterator PetscIntIter;
typedef thrust::permutation_iterator<PetscScalarIter,PetscIntIter> PetscMapIter;
typedef thrust::zip_iterator< thrust::tuple<
PetscMapIter, PetscMapIter, PetscMapIter, PetscMapIter,
PetscMapIter, PetscMapIter, PetscMapIter, PetscMapIter
> > hex_iterator ;
// iterators for looping of nodes within elements
typedef thrust::zip_iterator<
thrust::tuple< hex_iterator, hex_iterator, hex_iterator > > hex_node_iterator;
hex_node_iterator ElementBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_zip_iterator(thrust::make_tuple( // x - coordinates
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity0->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity1->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity2->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity3->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity4->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity5->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity6->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity7->begin())
)),
thrust::make_zip_iterator(thrust::make_tuple( // y - coordinates
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity0->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity1->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity2->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity3->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity4->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity5->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity6->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity7->begin())
)),
thrust::make_zip_iterator(thrust::make_tuple( // z - coordinates
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity0->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity1->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity2->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity3->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity4->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity5->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity6->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity7->begin())
))
));
}
hex_node_iterator ElementEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_zip_iterator(thrust::make_tuple( // x - coordinates
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity0->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity1->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity2->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity3->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity4->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity5->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity6->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity7->end())
)),
thrust::make_zip_iterator(thrust::make_tuple( // y - coordinates
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity0->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity1->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity2->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity3->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity4->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity5->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity6->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity7->end())
)),
thrust::make_zip_iterator(thrust::make_tuple( // z - coordinates
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity0->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity1->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity2->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity3->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity4->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity5->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity6->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity7->end())
))
));
}
// iterators for looping over element wise residual for each element
typedef thrust::zip_iterator< thrust::tuple<
PetscScalarIter, PetscScalarIter, PetscScalarIter, PetscScalarIter,
PetscScalarIter, PetscScalarIter, PetscScalarIter, PetscScalarIter
> > residual_iterator ;
residual_iterator ResidualBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
element_residuals->begin()+0,
element_residuals->begin()+1,
element_residuals->begin()+2,
element_residuals->begin()+3,
element_residuals->begin()+4,
element_residuals->begin()+5,
element_residuals->begin()+6,
element_residuals->begin()+7
));
}
residual_iterator ResidualEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
element_residuals->end()+0,
element_residuals->end()+1,
element_residuals->end()+2,
element_residuals->end()+3,
element_residuals->end()+4,
element_residuals->end()+5,
element_residuals->end()+6,
element_residuals->end()+7
));
}
// iterators for looping over element solution vector for each element
hex_iterator SolutionBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap0->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap1->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap2->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap3->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap4->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap5->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap6->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap7->begin())
));
}
hex_iterator SolutionEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap0->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap1->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap2->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap3->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap4->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap5->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap6->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap7->end())
));
}
typedef thrust::tuple<
PetscScalar,PetscScalar,PetscScalar,PetscScalar,
PetscScalar,PetscScalar,PetscScalar,PetscScalar
> hex_tuple;
// helper to get array/vector from hex tuple
__host__ __device__
void get_hex_vector(hex_tuple const &tuple, PetscScalar elemvector[8])
{
// decode the tuple
elemvector[0]= thrust::get<0>(tuple) ;
elemvector[1]= thrust::get<1>(tuple) ;
elemvector[2]= thrust::get<2>(tuple) ;
elemvector[3]= thrust::get<3>(tuple) ;
elemvector[4]= thrust::get<4>(tuple) ;
elemvector[5]= thrust::get<5>(tuple) ;
elemvector[6]= thrust::get<6>(tuple) ;
elemvector[7]= thrust::get<7>(tuple) ;
return;
}
};
// https://groups.google.com/forum/?fromgroups=#!topic/thrust-users/mqYDi2X7xmA
//
// An object's data members exist wherever the compiler decides to place
// them, given some constraints. For functors used with Thrust, data
// members get copied around to different memory spaces. A functor (and
// its data) begin on the host, probably implemented by the compiler in
// CPU registers. A Thrust algorithm will receive a copy of the user's
// functor and eventually package it up in something passed as a
// __global__ function argument. Depending on various particulars of the
// compiler, GPU, and size, __global__ function arguments may be
// implemented in either __shared__ memory, __constant__ memory, or
// global device memory. When a __global__ function executes, its
// parameters (including any copies of user functors) typically get
// copied into GPU registers. Does that make sense?
struct WFSModel : public LinearHexMesh
{
PetscInt m_rank,m_deviceNum; //device info
PetscScalar m_x0,m_y0,m_z0;
PetscScalar m_density ;
PetscScalar m_specificheat ;
PetscScalar m_deltat ;
PetscScalar m_bloodspecificheat ;
PetscScalar m_bodytemp ;
CUSPARRAY *m_conduction ;
CUSPARRAY *m_perfusion ;
CUSPARRAY *m_absorption ;
CUSPARRAY *m_scattering ;
WFSModel(PetscInt rank, PetscInt deviceNum,PetscInt numelements ) :
LinearHexMesh(numelements) ,
m_rank(rank),m_deviceNum(deviceNum)
{
m_density = 1.e3;
m_specificheat = 3.8e3;
m_deltat = 1.00;
m_bloodspecificheat = 3.4e3;
m_bodytemp = 37.0;
m_x0 = 0.005;
m_y0 = 0.005;
m_z0 = 0.005;
}
// iterators for looping over element solution vector for each element
typedef thrust::zip_iterator< thrust::tuple<
PetscScalarIter, PetscScalarIter, PetscScalarIter, PetscScalarIter
> > constitutive_iterator ;
constitutive_iterator ConstitutiveBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
m_perfusion ->begin(),//0 perfusion
m_conduction->begin(),//1 conduction
m_scattering->begin(),//2 scattering
m_absorption->begin() //3 absorption
));
}
constitutive_iterator ConstitutiveEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
m_perfusion ->end(),//0 perfusion
m_conduction->end(),//1 conduction
m_scattering->end(),//2 scattering
m_absorption->end() //3 absorption
));
}
// point_in_bbox from other post
template <typename Tuple>
__host__ __device__
void operator()(Tuple tuple)
{
// decode the hex node coordinates
PetscScalar NodeXcoord[8], NodeYcoord[8], NodeZcoord[8] ;
this->get_hex_vector(thrust::get<0>(thrust::get<0>(tuple)), NodeXcoord);
this->get_hex_vector(thrust::get<1>(thrust::get<0>(tuple)), NodeYcoord);
this->get_hex_vector(thrust::get<2>(thrust::get<0>(tuple)), NodeZcoord);
// decode local residual and solution
PetscScalar ElementResidual[8], ElementSolution[8];
this->get_hex_vector(thrust::get<1>(tuple), ElementResidual);
this->get_hex_vector(thrust::get<2>(tuple), ElementSolution);
// decode constitutive data
PetscScalar Perfusion = thrust::get<0>(thrust::get<3>(tuple));
PetscScalar Conduction = thrust::get<1>(thrust::get<3>(tuple));
PetscScalar Absorption = thrust::get<2>(thrust::get<3>(tuple));
PetscScalar Scattering = thrust::get<3>(thrust::get<3>(tuple));
printf("rank=%d device=%d blockDim=(%d,%d,%d) gridDim=(%d,%d,%d) warpSize=%d blockIdx=(%d,%d,%d) threadIdx=(%d,%d,%d) node0=(%f,%f,%f) residual0=%f solution0=%f absorption=%f conduction=%f\n",m_rank,m_deviceNum,blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, warpSize,blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z,
NodeXcoord[0],NodeYcoord[0],NodeZcoord[0],
ElementResidual[0],ElementSolution[0],
Absorption ,Conduction );
// //... do stuff with paramaters ...
// thrust::get<0>(t) = sc * ( source
// + m_density*m_specificheat/m_deltat* u_val
// + m_bloodspecificheat*m_perfusion*(m_bodytemp - 0.5*u_val) )
// for (unsigned int qp=0; qp != n_qpoints; qp++)
// {
// // Compute the solution & its gradient at the old Newton iterate
// Number u_theta = c.interior_value( this->u_var,qp);
// Gradient grad_u = c.interior_gradient(this->u_var,qp);
// // get damage values
// Number damage = c.interior_value( this->a_var,qp);
// Number DdamageDu= c.interior_value( this->b_var,qp);
// Gradient DiffusionDirection = this->m_MathModel.DiffusionDirection(subdomain_id) ;
// Gradient TempDiffusionDirection(
// grad_u(0)*DiffusionDirection(0) ,
// grad_u(1)*DiffusionDirection(1) ,
// grad_u(2)*DiffusionDirection(2)
// );
// // First, an i-loop over the velocity degrees of freedom.
// // We know that n_u_dofs == n_v_dofs so we can compute contributions
// // for both at the same time.
// for (unsigned int i=0; i != n_u_dofs; i++)
// {
// ElementResidual(i) += JxW[qp] * (
// phi[i][qp] *
// ( // perfusion term (check the SIGN)
// this->m_MathModel.PennesReactionTerm(field_id,u_theta,damage)
// - // source term
// this->m_MathModel.PennesSource(field_id,u_theta,
// damage,z_value,
// qpoint[qp],
// this->m_PowerID)
// )
// + // diffusion term
// this->m_MathModel.ThermalConductivity(field_id,u_theta,damage) *
// ( TempDiffusionDirection * dphi[i][qp] )
// ) ;
// // convection term
// Fu(i) += JxW[qp] * phi[i][qp] *
// ( this->m_MathModel.BulkFluidFlow(subdomain_id) * grad_u ) ;
// }
// }
}
// template <typename Tuple>
// __host__ __device__
// void operator()(Tuple t)
// {
// /* f = (2*u_i - u_(i+1) - u_(i-1))/h - h*exp(u_i) */
// thrust::get<0>(t) = 1;
// PetscInt Iz = thrust::get<1>(t)/m_ym/m_xm;
// PetscInt Iy = (thrust::get<1>(t)-Iz*m_ym*m_xm)/m_xm;
// PetscInt Ix = (thrust::get<1>(t)-Iz*m_ym*m_xm- Iy*m_xm);
// PetscScalar sc = m_hx*m_hz*m_hy;
// PetscScalar hxhzdhy = m_hx*m_hz/m_hy;
// PetscScalar hyhzdhx = m_hy*m_hz/m_hx;
// PetscScalar hxhydhz = m_hx*m_hy/m_hz;
// PetscScalar two = 2.0;
// // print launch parameters and dbg info
// // printf("rank=%d device=%d blockDim=(%d,%d,%d) gridDim=(%d,%d,%d) warpSize=%d blockIdx=(%d,%d,%d) threadIdx=(%d,%d,%d) size=(%d,%d,%d) globalID=%d index=(%d,%d,%d)\n",m_rank,m_deviceNum,blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, warpSize,blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z,m_xm,m_ym,m_zm,thrust::get<8>(t),Ix,Iy,Iz);
// PetscScalar u_val = thrust::get<0>(thrust::get<2>(t)) ;//1 u(i ,j ,k )
// PetscScalar perfusion = thrust::get<0>(thrust::get<3>(t)) ;//perfusion
// if (
// Ix > 0 && Ix < m_xm-1
// &&
// Iy > 0 && Iy < m_ym-1
// &&
// Iz > 0 && Iz < m_zm-1
// ) {
// // decode the tuple
// PetscScalar u_east = thrust::get<1>(thrust::get<2>(t));//2 u(i+1,j ,k )
// PetscScalar u_west = thrust::get<2>(thrust::get<2>(t));//3 u(i-1,j ,k )
// PetscScalar u_north = thrust::get<3>(thrust::get<2>(t));//4 u(i ,j+1,k )
// PetscScalar u_south = thrust::get<4>(thrust::get<2>(t));//5 u(i ,j-1,k )
// PetscScalar u_up = thrust::get<5>(thrust::get<2>(t));//6 u(i ,j ,k+1)
// PetscScalar u_down = thrust::get<6>(thrust::get<2>(t));//7 u(i ,j ,k-1)
// PetscScalar u_xx = (-u_east + two*u_val - u_west )*hyhzdhx;
// PetscScalar u_yy = (-u_north + two*u_val - u_south)*hxhzdhy;
// PetscScalar u_zz = (-u_up + two*u_val - u_down )*hxhydhz;
// PetscScalar sqdist = (m_hx * Ix - m_x0)*(m_hx * Ix - m_x0)
// + (m_hy * Iy - m_y0)*(m_hy * Iy - m_y0)
// + (m_hz * Iz - m_z0)*(m_hz * Iz - m_z0);
// PetscScalar source = 1.e4 * exp(5.0/(sqdist +1.0));
// thrust::get<0>(t) = sc * ( source
// + m_density*m_specificheat/m_deltat* u_val
// + m_bloodspecificheat*m_perfusion*(m_bodytemp - 0.5*u_val) )
// + m_conduction/2.0* (u_xx + u_yy + u_zz) ;
// } else { // dirichlet bc everywhere else
// thrust::get<0>(t) = u_val;
// }
//
// }
};
int main(int argc,char **argv)
{
SNES snes;
Vec x,f;
Mat J;
PetscErrorCode ierr;
hipError_t ierrCuda;
char *tmp,typeName[256];
int myrank;
PetscBool flg;
PetscInitialize(&argc,&argv,(char *)0,help);
MPI_Comm_rank(PETSC_COMM_WORLD, &myrank);
int deviceNum=myrank;
{
int deviceCount;
CUDA_SAFE_CALL(hipGetDeviceCount(&deviceCount));
ierr = PetscPrintf(PETSC_COMM_SELF, "!!!!!found %d devices !!!!!\n",deviceCount);CHKERRQ(ierr);
if (deviceCount == 0) {
ierr = PetscPrintf(PETSC_COMM_SELF, "!!!!!No devices found!!!!!\n");CHKERRQ(ierr);
return -1000;
}
if (deviceNum >= deviceCount || deviceNum < 0) {
ierr = PetscPrintf(PETSC_COMM_SELF, "\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", deviceNum, 0);CHKERRQ(ierr);
deviceNum = 0;
}
}
ierrCuda = hipSetDevice(deviceNum);
if (ierrCuda != hipSuccess) {
ierr = PetscPrintf(PETSC_COMM_SELF, " cuda Error: %s , exiting\n",hipGetErrorString( ierrCuda));CHKERRQ(ierr);
return -1;
}
ierr = PetscPrintf(PETSC_COMM_SELF, " reseting GPU: \n");CHKERRQ(ierr);
CUDA_SAFE_CALL(hipDeviceReset());
ierr = PetscPrintf(PETSC_COMM_SELF, "Running on...\n\n");CHKERRQ(ierr);
hipDeviceProp_t deviceProp;
if (hipGetDeviceProperties(&deviceProp, deviceNum) == hipSuccess) {
ierr = PetscPrintf(PETSC_COMM_SELF, " Device %d: %s %d.%d\n", deviceNum, deviceProp.name,deviceProp.major,deviceProp.minor);CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_SELF," Global memory available on device in bytes %d\n" , deviceProp.totalGlobalMem );
ierr = PetscPrintf(PETSC_COMM_SELF," Shared memory available per block in bytes %d\n" , deviceProp.sharedMemPerBlock );
ierr = PetscPrintf(PETSC_COMM_SELF," 32-bit registers available per block %d\n" , deviceProp.regsPerBlock );
ierr = PetscPrintf(PETSC_COMM_SELF," Warp size in threads %d\n" , deviceProp.warpSize );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum pitch in bytes allowed by memory copies %d\n" , deviceProp.memPitch );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum number of threads per block %d\n" , deviceProp.maxThreadsPerBlock );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a block %d\n" , deviceProp.maxThreadsDim[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a block %d\n" , deviceProp.maxThreadsDim[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a block %d\n" , deviceProp.maxThreadsDim[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a grid %d\n" , deviceProp.maxGridSize[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a grid %d\n" , deviceProp.maxGridSize[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a grid %d\n" , deviceProp.maxGridSize[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Clock frequency in kilohertz %d\n" , deviceProp.clockRate );
ierr = PetscPrintf(PETSC_COMM_SELF," Constant memory available on device in bytes %d\n" , deviceProp.totalConstMem );
ierr = PetscPrintf(PETSC_COMM_SELF," Alignment requirement for textures %d\n" , deviceProp.textureAlignment );
ierr = PetscPrintf(PETSC_COMM_SELF," Number of multiprocessors on device %d\n" , deviceProp.multiProcessorCount );
ierr = PetscPrintf(PETSC_COMM_SELF," Specified whether there is a run time limit on kernels %d\n" , deviceProp.kernelExecTimeoutEnabled );
ierr = PetscPrintf(PETSC_COMM_SELF," Device is integrated as opposed to discrete %d\n" , deviceProp.integrated );
ierr = PetscPrintf(PETSC_COMM_SELF," Device can map host memory with hipHostMalloc/hipHostGetDevicePointer %d\n", deviceProp.canMapHostMemory );
ierr = PetscPrintf(PETSC_COMM_SELF," Compute mode (See ::hipComputeMode) %d\n" , deviceProp.computeMode );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 1D texture size %d\n" , deviceProp.maxTexture1D );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D texture dimensions %d\n" , deviceProp.maxTexture2D[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D texture dimensions %d\n" , deviceProp.maxTexture2D[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 3D texture dimensions %d\n" , deviceProp.maxTexture3D[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 3D texture dimensions %d\n" , deviceProp.maxTexture3D[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 3D texture dimensions %d\n" , deviceProp.maxTexture3D[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 1D layered texture dimensions %d\n" , deviceProp.maxTexture1DLayered[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 1D layered texture dimensions %d\n" , deviceProp.maxTexture1DLayered[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D layered texture dimensions %d\n" , deviceProp.maxTexture2DLayered[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D layered texture dimensions %d\n" , deviceProp.maxTexture2DLayered[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D layered texture dimensions %d\n" , deviceProp.maxTexture2DLayered[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Alignment requirements for surfaces %d\n" , deviceProp.surfaceAlignment );
ierr = PetscPrintf(PETSC_COMM_SELF," Device can possibly execute multiple kernels concurrently %d\n" , deviceProp.concurrentKernels );
ierr = PetscPrintf(PETSC_COMM_SELF," Device has ECC support enabled %d\n" , deviceProp.ECCEnabled );
ierr = PetscPrintf(PETSC_COMM_SELF," PCI bus ID of the device %d\n" , deviceProp.pciBusID );
ierr = PetscPrintf(PETSC_COMM_SELF," PCI device ID of the device %d\n" , deviceProp.pciDeviceID );
ierr = PetscPrintf(PETSC_COMM_SELF," PCI domain ID of the device %d\n" , deviceProp.pciDomainID );
ierr = PetscPrintf(PETSC_COMM_SELF," 1 if device is a Tesla device using TCC driver, 0 otherwise %d\n" , deviceProp.tccDriver );
ierr = PetscPrintf(PETSC_COMM_SELF," Number of asynchronous engines %d\n" , deviceProp.asyncEngineCount );
ierr = PetscPrintf(PETSC_COMM_SELF," Device shares a unified address space with the host %d\n" , deviceProp.unifiedAddressing );
ierr = PetscPrintf(PETSC_COMM_SELF," Peak memory clock frequency in kilohertz %d\n" , deviceProp.memoryClockRate );
ierr = PetscPrintf(PETSC_COMM_SELF," Global memory bus width in bits %d\n" , deviceProp.memoryBusWidth );
ierr = PetscPrintf(PETSC_COMM_SELF," Size of L2 cache in bytes %d\n" , deviceProp.l2CacheSize );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum resident threads per multiprocessor %d\n" , deviceProp.maxThreadsPerMultiProcessor );
} else {
ierr = PetscPrintf(PETSC_COMM_SELF, " Unable to determine device %d properties, exiting\n",deviceNum);CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_SELF, " cuda Error: %s , exiting\n",hipGetErrorString( ierrCuda));CHKERRQ(ierr);
return -1;
}
PetscLogEventRegister("ComputeFunction",0,&LogFunction);
ierr = PetscOptionsGetString(PETSC_NULL,"-da_vec_type",typeName,256,&flg);CHKERRQ(ierr);
if (flg) {
ierr = PetscStrstr(typeName,"cusp",&tmp);CHKERRQ(ierr);
if (tmp) useCUSP = PETSC_TRUE;
}
size_t sizeIndex = 3 * sizeof(PetscInt);
CUDA_SAFE_CALL(hipMalloc((void **) &cudaTest, sizeIndex)); // Allocate array on device
//ierr = DMDACreate1d(PETSC_COMM_WORLD,DMDA_BOUNDARY_NONE,-8,1,1,PETSC_NULL,&da);CHKERRQ(ierr);
PetscInt globalSize = 125;
globalSize = 99;
DM da;
ierr = DMDACreate3d(PETSC_COMM_WORLD,DMDA_BOUNDARY_NONE,DMDA_BOUNDARY_NONE,DMDA_BOUNDARY_NONE,DMDA_STENCIL_STAR,-globalSize,-globalSize,-globalSize,PETSC_DECIDE,PETSC_DECIDE,PETSC_DECIDE,1,1,PETSC_NULL,PETSC_NULL,PETSC_NULL,&da);CHKERRQ(ierr);
ierr = DMCreateGlobalVector(da,&x); VecDuplicate(x,&f);CHKERRQ(ierr);
if (useCUSP)
{
ierr = DMCreateMatrix(da,MATAIJCUSP,&J);CHKERRQ(ierr);
}
else
{
ierr = DMCreateMatrix(da,MATAIJ,&J);CHKERRQ(ierr);
}
PetscInt GlobalDAMx,GlobalDAMy,GlobalDAMz;
ierr = DMDAGetInfo(da,PETSC_IGNORE,&GlobalDAMx,&GlobalDAMy,&GlobalDAMz,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
// ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
WFSModel FemModel(myrank,deviceNum,100);// fem mesh data
// StarStencil stencil_op(0,0,xs,ys,zs,xm,ym,zm,hx,hy,hz);// transformation operator
// ierr = DMSetApplicationContext(da,&stencil_op);CHKERRQ(ierr);
ierr = SNESCreate(PETSC_COMM_WORLD,&snes);CHKERRQ(ierr);
ierr = SNESSetFunction(snes,f,ComputeFunction,da);CHKERRQ(ierr);
ierr = SNESSetFromOptions(snes);CHKERRQ(ierr);
for (PetscInt iii = 0 ; iii < 1 ; iii++)
{
ierr = PetscPrintf(PETSC_COMM_WORLD, "gpu check %d \n",iii);CHKERRQ(ierr);
ierr = ComputeFunction(snes,x,f,(void *)da);
}
ierr = SNESSolve(snes,PETSC_NULL,x);CHKERRQ(ierr);
ierr = MatDestroy(&J);CHKERRQ(ierr);
ierr = VecDestroy(&x);CHKERRQ(ierr);
ierr = VecDestroy(&f);CHKERRQ(ierr);
ierr = SNESDestroy(&snes);CHKERRQ(ierr);
ierr = DMDestroy(&da);CHKERRQ(ierr);
// call device reset to flush buffer
CUDA_SAFE_CALL(hipDeviceReset());
PetscFinalize();
return 0;
}
// PetscErrorCode ComputeFunction(SNES snes,Vec u,Vec f,void *ctx)
// {
// PetscInt i,j,k;
// PetscInt ustartshift,uendshift,xoffset,yoffset,zoffset,fstart;
// PetscScalar ***uu,***ff,hxhzdhy,hyhzdhx,hxhydhz;
// PetscScalar u_val,u_east,u_west,u_north,u_south,u_up, u_down, u_xx, u_yy,u_zz,sc ,two =2.0;
// DM da = (DM) ctx;
// Vec ulocal;
// PetscErrorCode ierr;
// PetscMPIInt rank,size;
// MPI_Comm comm;
// CUSPARRAY *uarray,*farray;
// PetscLogEventBegin(LogFunction,0,0,0,0); // init libMesh
//
// ierr = DMGetLocalVector(da,&ulocal);CHKERRQ(ierr);
// ierr = DMGlobalToLocalBegin(da,u,INSERT_VALUES,ulocal);CHKERRQ(ierr);
// ierr = DMGlobalToLocalEnd(da,u,INSERT_VALUES,ulocal);CHKERRQ(ierr);
// StarStencil *stencil_op;
// ierr = DMGetApplicationContext(da,(void *)&stencil_op);CHKERRQ(ierr);
// hxhzdhy = stencil_op->m_hx*stencil_op->m_hz/stencil_op->m_hy;
// hyhzdhx = stencil_op->m_hy*stencil_op->m_hz/stencil_op->m_hx;
// hxhydhz = stencil_op->m_hx*stencil_op->m_hy/stencil_op->m_hz;
// sc = stencil_op->m_hx*stencil_op->m_hy*stencil_op->m_hz*3.0;
//
// if (useCUSP) {
// ierr = VecCUSPGetArrayRead(ulocal,&uarray);CHKERRQ(ierr);
// ierr = VecCUSPGetArrayWrite(f,&farray);CHKERRQ(ierr);
// ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
// ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
// ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
// if (rank) ustartshift = 1; else ustartshift = 0;
// if (rank != size-1) uendshift = 1; else uendshift = 0;
// xoffset = 1;
// yoffset = stencil_op->m_xm;
// zoffset = stencil_op->m_xm*stencil_op->m_ym;
// ierr = VecGetOwnershipRange(f,&fstart,PETSC_NULL);CHKERRQ(ierr);
// try {
//
// // typedef these iterators for shorthand
// thrust::for_each(
// thrust::make_zip_iterator(
// thrust::make_tuple(
// farray->begin(), //0
// thrust::counting_iterator<int>(fstart) , //1
// thrust::make_zip_iterator(
// thrust::make_tuple(
// uarray->begin()+ustartshift, //1 u(i ,j ,k )
// uarray->begin()+ustartshift + xoffset, //2 u(i+1,j ,k )
// uarray->begin()+ustartshift - xoffset, //3 u(i-1,j ,k )
// uarray->begin()+ustartshift + yoffset, //4 u(i ,j+1,k )
// uarray->begin()+ustartshift - yoffset, //5 u(i ,j-1,k )
// uarray->begin()+ustartshift + zoffset, //6 u(i ,j ,k+1)
// uarray->begin()+ustartshift - zoffset //7 u(i ,j ,k-1)
// )),
// thrust::make_zip_iterator(
// thrust::make_tuple(
// thrust::constant_iterator<PetscScalar>(6.0 ),//0 perfusion
// thrust::constant_iterator<PetscScalar>(0.57 ),//1 conduction
// thrust::constant_iterator<PetscScalar>(5.e2 ),//2 scattering
// thrust::constant_iterator<PetscScalar>(14.e3) //3 absorption
// ))
// )),
// thrust::make_zip_iterator(
// thrust::make_tuple(
// farray->end(), //0
// thrust::counting_iterator<int>(fstart) + u->map->n , //1
// thrust::make_zip_iterator(
// thrust::make_tuple(
// uarray->end()+uendshift, //2_0 u(i ,j ,k )
// uarray->end()+uendshift + xoffset, //2_1 u(i+1,j ,k )
// uarray->end()+uendshift - xoffset, //2_2 u(i-1,j ,k )
// uarray->end()+uendshift + yoffset, //2_3 u(i ,j+1,k )
// uarray->end()+uendshift - yoffset, //2_4 u(i ,j-1,k )
// uarray->end()+uendshift + zoffset, //2_5 u(i ,j ,k+1)
// uarray->end()+uendshift - zoffset //2_6 u(i ,j ,k-1)
// )),
// thrust::make_zip_iterator(
// thrust::make_tuple(
// thrust::constant_iterator<PetscScalar>(6.0 ),//3_0 perfusion
// thrust::constant_iterator<PetscScalar>(0.57 ),//3_1 conduction
// thrust::constant_iterator<PetscScalar>(5.e2 ),//3_2 scattering
// thrust::constant_iterator<PetscScalar>(14.e3) //3_3 absorption
// ))
// )),
// *stencil_op);
//
// PetscInt hostTest[3]={-1,-1,-1};
// //CUDA_SAFE_CALL(hipMemcpy(hostTest, cudaTest,3*sizeof(PetscInt),hipMemcpyDeviceToHost));
// ierr = PetscPrintf(PETSC_COMM_WORLD, "%d %d %d \n",hostTest[0],hostTest[1],hostTest[2]);CHKERRQ(ierr);
// }
// catch(char* all){
// ierr = PetscPrintf(PETSC_COMM_WORLD, "Thrust is not working\n");CHKERRQ(ierr);
// }
// ierr = VecCUSPRestoreArrayRead(ulocal,&uarray);CHKERRQ(ierr);
// ierr = VecCUSPRestoreArrayWrite(f,&farray);CHKERRQ(ierr);
// } else {
// ierr = DMDAVecGetArray(da,ulocal,&uu);CHKERRQ(ierr);
// ierr = DMDAVecGetArray(da,f,&ff);CHKERRQ(ierr);
//
// PetscInt GlobalDAMx,GlobalDAMy,GlobalDAMz;
// ierr = DMDAGetInfo(da,PETSC_IGNORE,&GlobalDAMx,&GlobalDAMy,&GlobalDAMz,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
// /* Compute function over the locally owned part of the grid */
// for (k=stencil_op->m_zs; k<stencil_op->m_zs+stencil_op->m_zm; k++) {
// for (j=stencil_op->m_ys; j<stencil_op->m_ys+stencil_op->m_ym; j++) {
// for (i=stencil_op->m_xs; i<stencil_op->m_xs+stencil_op->m_xm; i++) {
// if (i == 0 || j == 0 || k == 0 || i == GlobalDAMx-1 || j == GlobalDAMy-1 || k == GlobalDAMz-1) {
// ff[k][j][i] = uu[k][j][i];
// } else {
// u_val = uu[k][j][i];
// u_east = uu[k][j][i+1];
// u_west = uu[k][j][i-1];
// u_north = uu[k][j+1][i];
// u_south = uu[k][j-1][i];
// u_up = uu[k+1][j][i];
// u_down = uu[k-1][j][i];
// u_xx = (-u_east + two*u_val - u_west )*hyhzdhx;
// u_yy = (-u_north + two*u_val - u_south)*hxhzdhy;
// u_zz = (-u_up + two*u_val - u_down )*hxhydhz;
// ff[k][j][i] = u_xx + u_yy + u_zz - sc*PetscExpScalar(u_val);
// }
// }
// }
// }
// ierr = DMDAVecRestoreArray(da,ulocal,&uu);CHKERRQ(ierr);
// ierr = DMDAVecRestoreArray(da,f,&ff);CHKERRQ(ierr);
// }
// ierr = DMRestoreLocalVector(da,&ulocal);CHKERRQ(ierr);
// PetscLogEventEnd(LogFunction,0,0,0,0); // init libMesh
// //VecView(u,0);printf("f\n");
// //VecView(f,0);
// return 0;
//
// }
// PetscErrorCode ComputeJacobian(SNES snes,Vec x,Mat *J,Mat *B,MatStructure *flag,void *ctx)
// {
// DM da = (DM) ctx;
// Vec xlocal;
// PetscErrorCode ierr;
// if(jacobianComputed) return 0;
// jacobianComputed = PETSC_TRUE;
//
// ierr = DMGetLocalVector(da,&xlocal);DMGlobalToLocalBegin(da,x,INSERT_VALUES,xlocal);CHKERRQ(ierr);
// ierr = DMGlobalToLocalEnd(da,x,INSERT_VALUES,xlocal);CHKERRQ(ierr);
//
// PetscInt GlobalDAMx,GlobalDAMy,GlobalDAMz,xs,xm,ys,ym,zs,zm;
// PetscScalar hx,hy,hz;
// ierr = DMDAGetInfo(da,PETSC_IGNORE,&GlobalDAMx,&GlobalDAMy,&GlobalDAMz,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
// hx = 1.0/(PetscReal)(GlobalDAMx-1);
// hy = 1.0/(PetscReal)(GlobalDAMy-1);
// hz = 1.0/(PetscReal)(GlobalDAMz-1);
// ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
//
// PetscScalar hxhzdhy,hyhzdhx,hxhydhz,sc;
// hxhzdhy = hx*hz/hy;
// hyhzdhx = hy*hz/hx;
// hxhydhz = hx*hy/hz;
// sc = hx*hy*hz*3.0;
//
// ierr = MatZeroEntries(*J);CHKERRQ(ierr);
// ierr = MatShift(*J,1.0);CHKERRQ(ierr);
//
// StarStencil *stencil_op;
// ierr = DMGetApplicationContext(da,(void *)&stencil_op);CHKERRQ(ierr);
//
// /* Compute function over the locally owned part of the grid */
// PetscScalar v[7],two = 2.0;
// MatStencil col[7],row;
// PetscInt i,j,k;
// for (k=zs; k<zs+zm; k++) {
// for (j=ys; j<ys+ym; j++) {
// for (i=xs; i<xs+xm; i++) {
// row.k = k; row.j = j; row.i = i;
// if (i > 0 && j > 0 && k > 0 && i < GlobalDAMx-1 && j < GlobalDAMy-1 && k < GlobalDAMz-1) {
// v[0] = -0.5 * stencil_op->m_conduction * hxhydhz; col[0].k=k-1;col[0].j=j; col[0].i = i;
// v[1] = -0.5 * stencil_op->m_conduction * hxhzdhy; col[1].k=k; col[1].j=j-1;col[1].i = i;
// v[2] = -0.5 * stencil_op->m_conduction * hyhzdhx; col[2].k=k; col[2].j=j; col[2].i = i-1;
// v[3] = sc*( stencil_op->m_density*stencil_op->m_specificheat/stencil_op->m_deltat
// + 0.5 * stencil_op->m_perfusion * stencil_op->m_bloodspecificheat)
// + 1.0 * stencil_op->m_bloodspecificheat * (hyhzdhx+hxhzdhy+hxhydhz);
// col[3].k=row.k;col[3].j=row.j;col[3].i = row.i;
// v[4] = -0.5 * stencil_op->m_conduction * hyhzdhx; col[4].k=k; col[4].j=j; col[4].i = i+1;
// v[5] = -0.5 * stencil_op->m_conduction * hxhzdhy; col[5].k=k; col[5].j=j+1;col[5].i = i;
// v[6] = -0.5 * stencil_op->m_conduction * hxhydhz; col[6].k=k+1;col[6].j=j; col[6].i = i;
// ierr = MatSetValuesStencil(*J,1,&row,7,col,v,INSERT_VALUES);CHKERRQ(ierr);
// }
// }
// }
// }
//
// ierr = MatAssemblyBegin(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
// ierr = MatAssemblyEnd(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
// *flag = SAME_NONZERO_PATTERN;
// ierr = DMRestoreLocalVector(da,&xlocal);CHKERRQ(ierr);
// return 0;
// }
// gNek is really lightweight - and the input requirements are clearly defined.
// Feed us mesh vertex coordinates, the element-vertex connectivity, the
// element boundary conditions and the material parameters and we can
// feed back a solution. This can even be done through cubit or gmsh files [ I think ].
//
// I have to say that GPU compute is pretty much all or nothing - we also try to
// avoid too much traffic between host and device. However, we do the
// preprocessing on the host as this is usually a sub-dominant cost.
PetscErrorCode ComputeFunction(SNES snes,Vec u,Vec f,void *ctx)
{
WFSModel *FemModel= (WFSModel*) ctx;
PetscErrorCode ierr;
// get solution array for reading
// FIXME: will not work for mpi distributed array
// TODO: fix for MPI
ierr = VecCUSPGetArrayRead( u,&FemModel->uarray);CHKERRQ(ierr);
// get residual array for writing
ierr = VecCUSPGetArrayWrite(f,&FemModel->farray);CHKERRQ(ierr);
// loop over elements
// ensure thread safety by each thread writing to its own local residual
// ie similar to DG methods
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(
FemModel->ElementBegin(),
FemModel->ResidualBegin(),
FemModel->SolutionBegin(),
FemModel->ConstitutiveBegin()
)),
thrust::make_zip_iterator(thrust::make_tuple(
FemModel->ElementEnd(),
FemModel->ResidualEnd(),
FemModel->SolutionEnd(),
FemModel->ConstitutiveEnd()
)),
*FemModel // call the overloaded operator() from this class
);
// Reduce the expanded residual to the usual
// continuous additive contributions
// first need to sort
thrust::sort_by_key(
FemModel->m_LocalElementMap->begin(),
FemModel->m_LocalElementMap->end(),
FemModel->element_residuals->begin()
);
// reduce the sorted array
thrust::reduce_by_key(
FemModel->m_LocalElementMap->begin(),
FemModel->m_LocalElementMap->end(),
FemModel->element_residuals->begin(),
thrust::make_discard_iterator(),
FemModel->farray->begin()
);
// restore arrays
ierr = VecCUSPRestoreArrayRead( u,&FemModel->uarray);CHKERRQ(ierr);
ierr = VecCUSPRestoreArrayWrite(f,&FemModel->farray);CHKERRQ(ierr);
return 0;
}
| c73df7a19b67b7e3e02b3d67a8db442c2c32efcb.cu | static char help[] = "Solves -Laplacian u - exp(u) = 0, 0 < x < 1 using GPU\n\n";
/*
Same as ex47.c except it also uses the GPU to evaluate the function
*/
// TODO: CAUTION sort header needs to be first or getting compile errors??
// TODO: CAUTION sort header needs to be first or getting compile errors??
// TODO: CAUTION sort header needs to be first or getting compile errors??
#include <thrust/sort.h>
// petsc includes
#include <petscdmda.h>
#include <petscsnes.h>
#include <petsccusp.h>
// cusp includes
#include "cusp/detail/device/utils.h"
extern PetscErrorCode ComputeFunction(SNES,Vec,Vec,void*);
PetscBool useCUSP = PETSC_FALSE;
PetscBool jacobianComputed = PETSC_FALSE;
PetscLogEvent LogFunction = 0;
__device__ PetscInt *cudaTest;
struct LinearHexMesh
{
LinearHexMesh(PetscInt numelements) :
m_NumElements (numelements)
{
m_NodesPerElement = 8 ;
element_residuals->resize(numelements*m_NodesPerElement);
}
// number of elements
PetscInt m_NumElements;
// number of nodes per element
PetscInt m_NodesPerElement ;
// node coordinates
CUSPARRAY *m_NodeXCoord, *m_NodeYCoord, *m_NodeZCoord;
// solution and residual
CUSPARRAY *uarray,*farray;
// temporary vector to hold element wise residual
// 8 residual entries per element (one for each node)
CUSPARRAY *element_residuals;
// connectivity information is stored per node for structure of array access
CUSPINTARRAYGPU *m_Connectivity0,
*m_Connectivity1,
*m_Connectivity2,
*m_Connectivity3,
*m_Connectivity4,
*m_Connectivity5,
*m_Connectivity6,
*m_Connectivity7;
CUSPINTARRAYGPU *m_GlobalLocalMap0,
*m_GlobalLocalMap1,
*m_GlobalLocalMap2,
*m_GlobalLocalMap3,
*m_GlobalLocalMap4,
*m_GlobalLocalMap5,
*m_GlobalLocalMap6,
*m_GlobalLocalMap7;
CUSPINTARRAYGPU *m_LocalElementMap;
typedef CUSPARRAY::iterator PetscScalarIter;
typedef CUSPINTARRAYGPU::iterator PetscIntIter;
typedef thrust::permutation_iterator<PetscScalarIter,PetscIntIter> PetscMapIter;
typedef thrust::zip_iterator< thrust::tuple<
PetscMapIter, PetscMapIter, PetscMapIter, PetscMapIter,
PetscMapIter, PetscMapIter, PetscMapIter, PetscMapIter
> > hex_iterator ;
// iterators for looping of nodes within elements
typedef thrust::zip_iterator<
thrust::tuple< hex_iterator, hex_iterator, hex_iterator > > hex_node_iterator;
hex_node_iterator ElementBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_zip_iterator(thrust::make_tuple( // x - coordinates
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity0->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity1->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity2->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity3->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity4->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity5->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity6->begin()),
thrust::make_permutation_iterator(m_NodeXCoord->begin(),m_Connectivity7->begin())
)),
thrust::make_zip_iterator(thrust::make_tuple( // y - coordinates
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity0->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity1->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity2->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity3->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity4->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity5->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity6->begin()),
thrust::make_permutation_iterator(m_NodeYCoord->begin(),m_Connectivity7->begin())
)),
thrust::make_zip_iterator(thrust::make_tuple( // z - coordinates
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity0->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity1->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity2->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity3->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity4->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity5->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity6->begin()),
thrust::make_permutation_iterator(m_NodeZCoord->begin(),m_Connectivity7->begin())
))
));
}
hex_node_iterator ElementEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_zip_iterator(thrust::make_tuple( // x - coordinates
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity0->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity1->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity2->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity3->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity4->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity5->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity6->end()),
thrust::make_permutation_iterator(m_NodeXCoord->end(),m_Connectivity7->end())
)),
thrust::make_zip_iterator(thrust::make_tuple( // y - coordinates
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity0->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity1->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity2->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity3->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity4->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity5->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity6->end()),
thrust::make_permutation_iterator(m_NodeYCoord->end(),m_Connectivity7->end())
)),
thrust::make_zip_iterator(thrust::make_tuple( // z - coordinates
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity0->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity1->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity2->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity3->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity4->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity5->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity6->end()),
thrust::make_permutation_iterator(m_NodeZCoord->end(),m_Connectivity7->end())
))
));
}
// iterators for looping over element wise residual for each element
typedef thrust::zip_iterator< thrust::tuple<
PetscScalarIter, PetscScalarIter, PetscScalarIter, PetscScalarIter,
PetscScalarIter, PetscScalarIter, PetscScalarIter, PetscScalarIter
> > residual_iterator ;
residual_iterator ResidualBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
element_residuals->begin()+0,
element_residuals->begin()+1,
element_residuals->begin()+2,
element_residuals->begin()+3,
element_residuals->begin()+4,
element_residuals->begin()+5,
element_residuals->begin()+6,
element_residuals->begin()+7
));
}
residual_iterator ResidualEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
element_residuals->end()+0,
element_residuals->end()+1,
element_residuals->end()+2,
element_residuals->end()+3,
element_residuals->end()+4,
element_residuals->end()+5,
element_residuals->end()+6,
element_residuals->end()+7
));
}
// iterators for looping over element solution vector for each element
hex_iterator SolutionBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap0->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap1->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap2->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap3->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap4->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap5->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap6->begin()),
thrust::make_permutation_iterator(uarray->begin(),m_GlobalLocalMap7->begin())
));
}
hex_iterator SolutionEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap0->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap1->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap2->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap3->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap4->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap5->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap6->end()),
thrust::make_permutation_iterator(uarray->end(),m_GlobalLocalMap7->end())
));
}
typedef thrust::tuple<
PetscScalar,PetscScalar,PetscScalar,PetscScalar,
PetscScalar,PetscScalar,PetscScalar,PetscScalar
> hex_tuple;
// helper to get array/vector from hex tuple
__host__ __device__
void get_hex_vector(hex_tuple const &tuple, PetscScalar elemvector[8])
{
// decode the tuple
elemvector[0]= thrust::get<0>(tuple) ;
elemvector[1]= thrust::get<1>(tuple) ;
elemvector[2]= thrust::get<2>(tuple) ;
elemvector[3]= thrust::get<3>(tuple) ;
elemvector[4]= thrust::get<4>(tuple) ;
elemvector[5]= thrust::get<5>(tuple) ;
elemvector[6]= thrust::get<6>(tuple) ;
elemvector[7]= thrust::get<7>(tuple) ;
return;
}
};
// https://groups.google.com/forum/?fromgroups=#!topic/thrust-users/mqYDi2X7xmA
//
// An object's data members exist wherever the compiler decides to place
// them, given some constraints. For functors used with Thrust, data
// members get copied around to different memory spaces. A functor (and
// its data) begin on the host, probably implemented by the compiler in
// CPU registers. A Thrust algorithm will receive a copy of the user's
// functor and eventually package it up in something passed as a
// __global__ function argument. Depending on various particulars of the
// compiler, GPU, and size, __global__ function arguments may be
// implemented in either __shared__ memory, __constant__ memory, or
// global device memory. When a __global__ function executes, its
// parameters (including any copies of user functors) typically get
// copied into GPU registers. Does that make sense?
struct WFSModel : public LinearHexMesh
{
PetscInt m_rank,m_deviceNum; //device info
PetscScalar m_x0,m_y0,m_z0;
PetscScalar m_density ;
PetscScalar m_specificheat ;
PetscScalar m_deltat ;
PetscScalar m_bloodspecificheat ;
PetscScalar m_bodytemp ;
CUSPARRAY *m_conduction ;
CUSPARRAY *m_perfusion ;
CUSPARRAY *m_absorption ;
CUSPARRAY *m_scattering ;
WFSModel(PetscInt rank, PetscInt deviceNum,PetscInt numelements ) :
LinearHexMesh(numelements) ,
m_rank(rank),m_deviceNum(deviceNum)
{
m_density = 1.e3;
m_specificheat = 3.8e3;
m_deltat = 1.00;
m_bloodspecificheat = 3.4e3;
m_bodytemp = 37.0;
m_x0 = 0.005;
m_y0 = 0.005;
m_z0 = 0.005;
}
// iterators for looping over element solution vector for each element
typedef thrust::zip_iterator< thrust::tuple<
PetscScalarIter, PetscScalarIter, PetscScalarIter, PetscScalarIter
> > constitutive_iterator ;
constitutive_iterator ConstitutiveBegin()
{
return thrust::make_zip_iterator(thrust::make_tuple(
m_perfusion ->begin(),//0 perfusion
m_conduction->begin(),//1 conduction
m_scattering->begin(),//2 scattering
m_absorption->begin() //3 absorption
));
}
constitutive_iterator ConstitutiveEnd()
{
return thrust::make_zip_iterator(thrust::make_tuple(
m_perfusion ->end(),//0 perfusion
m_conduction->end(),//1 conduction
m_scattering->end(),//2 scattering
m_absorption->end() //3 absorption
));
}
// point_in_bbox from other post
template <typename Tuple>
__host__ __device__
void operator()(Tuple tuple)
{
// decode the hex node coordinates
PetscScalar NodeXcoord[8], NodeYcoord[8], NodeZcoord[8] ;
this->get_hex_vector(thrust::get<0>(thrust::get<0>(tuple)), NodeXcoord);
this->get_hex_vector(thrust::get<1>(thrust::get<0>(tuple)), NodeYcoord);
this->get_hex_vector(thrust::get<2>(thrust::get<0>(tuple)), NodeZcoord);
// decode local residual and solution
PetscScalar ElementResidual[8], ElementSolution[8];
this->get_hex_vector(thrust::get<1>(tuple), ElementResidual);
this->get_hex_vector(thrust::get<2>(tuple), ElementSolution);
// decode constitutive data
PetscScalar Perfusion = thrust::get<0>(thrust::get<3>(tuple));
PetscScalar Conduction = thrust::get<1>(thrust::get<3>(tuple));
PetscScalar Absorption = thrust::get<2>(thrust::get<3>(tuple));
PetscScalar Scattering = thrust::get<3>(thrust::get<3>(tuple));
printf("rank=%d device=%d blockDim=(%d,%d,%d) gridDim=(%d,%d,%d) warpSize=%d blockIdx=(%d,%d,%d) threadIdx=(%d,%d,%d) node0=(%f,%f,%f) residual0=%f solution0=%f absorption=%f conduction=%f\n",m_rank,m_deviceNum,blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, warpSize,blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z,
NodeXcoord[0],NodeYcoord[0],NodeZcoord[0],
ElementResidual[0],ElementSolution[0],
Absorption ,Conduction );
// //... do stuff with paramaters ...
// thrust::get<0>(t) = sc * ( source
// + m_density*m_specificheat/m_deltat* u_val
// + m_bloodspecificheat*m_perfusion*(m_bodytemp - 0.5*u_val) )
// for (unsigned int qp=0; qp != n_qpoints; qp++)
// {
// // Compute the solution & its gradient at the old Newton iterate
// Number u_theta = c.interior_value( this->u_var,qp);
// Gradient grad_u = c.interior_gradient(this->u_var,qp);
// // get damage values
// Number damage = c.interior_value( this->a_var,qp);
// Number DdamageDu= c.interior_value( this->b_var,qp);
// Gradient DiffusionDirection = this->m_MathModel.DiffusionDirection(subdomain_id) ;
// Gradient TempDiffusionDirection(
// grad_u(0)*DiffusionDirection(0) ,
// grad_u(1)*DiffusionDirection(1) ,
// grad_u(2)*DiffusionDirection(2)
// );
// // First, an i-loop over the velocity degrees of freedom.
// // We know that n_u_dofs == n_v_dofs so we can compute contributions
// // for both at the same time.
// for (unsigned int i=0; i != n_u_dofs; i++)
// {
// ElementResidual(i) += JxW[qp] * (
// phi[i][qp] *
// ( // perfusion term (check the SIGN)
// this->m_MathModel.PennesReactionTerm(field_id,u_theta,damage)
// - // source term
// this->m_MathModel.PennesSource(field_id,u_theta,
// damage,z_value,
// qpoint[qp],
// this->m_PowerID)
// )
// + // diffusion term
// this->m_MathModel.ThermalConductivity(field_id,u_theta,damage) *
// ( TempDiffusionDirection * dphi[i][qp] )
// ) ;
// // convection term
// Fu(i) += JxW[qp] * phi[i][qp] *
// ( this->m_MathModel.BulkFluidFlow(subdomain_id) * grad_u ) ;
// }
// }
}
// template <typename Tuple>
// __host__ __device__
// void operator()(Tuple t)
// {
// /* f = (2*u_i - u_(i+1) - u_(i-1))/h - h*exp(u_i) */
// thrust::get<0>(t) = 1;
// PetscInt Iz = thrust::get<1>(t)/m_ym/m_xm;
// PetscInt Iy = (thrust::get<1>(t)-Iz*m_ym*m_xm)/m_xm;
// PetscInt Ix = (thrust::get<1>(t)-Iz*m_ym*m_xm- Iy*m_xm);
// PetscScalar sc = m_hx*m_hz*m_hy;
// PetscScalar hxhzdhy = m_hx*m_hz/m_hy;
// PetscScalar hyhzdhx = m_hy*m_hz/m_hx;
// PetscScalar hxhydhz = m_hx*m_hy/m_hz;
// PetscScalar two = 2.0;
// // print launch parameters and dbg info
// // printf("rank=%d device=%d blockDim=(%d,%d,%d) gridDim=(%d,%d,%d) warpSize=%d blockIdx=(%d,%d,%d) threadIdx=(%d,%d,%d) size=(%d,%d,%d) globalID=%d index=(%d,%d,%d)\n",m_rank,m_deviceNum,blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, warpSize,blockIdx.x,blockIdx.y,blockIdx.z,threadIdx.x,threadIdx.y,threadIdx.z,m_xm,m_ym,m_zm,thrust::get<8>(t),Ix,Iy,Iz);
// PetscScalar u_val = thrust::get<0>(thrust::get<2>(t)) ;//1 u(i ,j ,k )
// PetscScalar perfusion = thrust::get<0>(thrust::get<3>(t)) ;//perfusion
// if (
// Ix > 0 && Ix < m_xm-1
// &&
// Iy > 0 && Iy < m_ym-1
// &&
// Iz > 0 && Iz < m_zm-1
// ) {
// // decode the tuple
// PetscScalar u_east = thrust::get<1>(thrust::get<2>(t));//2 u(i+1,j ,k )
// PetscScalar u_west = thrust::get<2>(thrust::get<2>(t));//3 u(i-1,j ,k )
// PetscScalar u_north = thrust::get<3>(thrust::get<2>(t));//4 u(i ,j+1,k )
// PetscScalar u_south = thrust::get<4>(thrust::get<2>(t));//5 u(i ,j-1,k )
// PetscScalar u_up = thrust::get<5>(thrust::get<2>(t));//6 u(i ,j ,k+1)
// PetscScalar u_down = thrust::get<6>(thrust::get<2>(t));//7 u(i ,j ,k-1)
// PetscScalar u_xx = (-u_east + two*u_val - u_west )*hyhzdhx;
// PetscScalar u_yy = (-u_north + two*u_val - u_south)*hxhzdhy;
// PetscScalar u_zz = (-u_up + two*u_val - u_down )*hxhydhz;
// PetscScalar sqdist = (m_hx * Ix - m_x0)*(m_hx * Ix - m_x0)
// + (m_hy * Iy - m_y0)*(m_hy * Iy - m_y0)
// + (m_hz * Iz - m_z0)*(m_hz * Iz - m_z0);
// PetscScalar source = 1.e4 * exp(5.0/(sqdist +1.0));
// thrust::get<0>(t) = sc * ( source
// + m_density*m_specificheat/m_deltat* u_val
// + m_bloodspecificheat*m_perfusion*(m_bodytemp - 0.5*u_val) )
// + m_conduction/2.0* (u_xx + u_yy + u_zz) ;
// } else { // dirichlet bc everywhere else
// thrust::get<0>(t) = u_val;
// }
//
// }
};
int main(int argc,char **argv)
{
SNES snes;
Vec x,f;
Mat J;
PetscErrorCode ierr;
cudaError ierrCuda;
char *tmp,typeName[256];
int myrank;
PetscBool flg;
PetscInitialize(&argc,&argv,(char *)0,help);
MPI_Comm_rank(PETSC_COMM_WORLD, &myrank);
int deviceNum=myrank;
{
int deviceCount;
CUDA_SAFE_CALL(cudaGetDeviceCount(&deviceCount));
ierr = PetscPrintf(PETSC_COMM_SELF, "!!!!!found %d devices !!!!!\n",deviceCount);CHKERRQ(ierr);
if (deviceCount == 0) {
ierr = PetscPrintf(PETSC_COMM_SELF, "!!!!!No devices found!!!!!\n");CHKERRQ(ierr);
return -1000;
}
if (deviceNum >= deviceCount || deviceNum < 0) {
ierr = PetscPrintf(PETSC_COMM_SELF, "\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", deviceNum, 0);CHKERRQ(ierr);
deviceNum = 0;
}
}
ierrCuda = cudaSetDevice(deviceNum);
if (ierrCuda != cudaSuccess) {
ierr = PetscPrintf(PETSC_COMM_SELF, " cuda Error: %s , exiting\n",cudaGetErrorString( ierrCuda));CHKERRQ(ierr);
return -1;
}
ierr = PetscPrintf(PETSC_COMM_SELF, " reseting GPU: \n");CHKERRQ(ierr);
CUDA_SAFE_CALL(cudaDeviceReset());
ierr = PetscPrintf(PETSC_COMM_SELF, "Running on...\n\n");CHKERRQ(ierr);
cudaDeviceProp deviceProp;
if (cudaGetDeviceProperties(&deviceProp, deviceNum) == cudaSuccess) {
ierr = PetscPrintf(PETSC_COMM_SELF, " Device %d: %s %d.%d\n", deviceNum, deviceProp.name,deviceProp.major,deviceProp.minor);CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_SELF," Global memory available on device in bytes %d\n" , deviceProp.totalGlobalMem );
ierr = PetscPrintf(PETSC_COMM_SELF," Shared memory available per block in bytes %d\n" , deviceProp.sharedMemPerBlock );
ierr = PetscPrintf(PETSC_COMM_SELF," 32-bit registers available per block %d\n" , deviceProp.regsPerBlock );
ierr = PetscPrintf(PETSC_COMM_SELF," Warp size in threads %d\n" , deviceProp.warpSize );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum pitch in bytes allowed by memory copies %d\n" , deviceProp.memPitch );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum number of threads per block %d\n" , deviceProp.maxThreadsPerBlock );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a block %d\n" , deviceProp.maxThreadsDim[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a block %d\n" , deviceProp.maxThreadsDim[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a block %d\n" , deviceProp.maxThreadsDim[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a grid %d\n" , deviceProp.maxGridSize[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a grid %d\n" , deviceProp.maxGridSize[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum size of each dimension of a grid %d\n" , deviceProp.maxGridSize[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Clock frequency in kilohertz %d\n" , deviceProp.clockRate );
ierr = PetscPrintf(PETSC_COMM_SELF," Constant memory available on device in bytes %d\n" , deviceProp.totalConstMem );
ierr = PetscPrintf(PETSC_COMM_SELF," Alignment requirement for textures %d\n" , deviceProp.textureAlignment );
ierr = PetscPrintf(PETSC_COMM_SELF," Number of multiprocessors on device %d\n" , deviceProp.multiProcessorCount );
ierr = PetscPrintf(PETSC_COMM_SELF," Specified whether there is a run time limit on kernels %d\n" , deviceProp.kernelExecTimeoutEnabled );
ierr = PetscPrintf(PETSC_COMM_SELF," Device is integrated as opposed to discrete %d\n" , deviceProp.integrated );
ierr = PetscPrintf(PETSC_COMM_SELF," Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer %d\n", deviceProp.canMapHostMemory );
ierr = PetscPrintf(PETSC_COMM_SELF," Compute mode (See ::cudaComputeMode) %d\n" , deviceProp.computeMode );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 1D texture size %d\n" , deviceProp.maxTexture1D );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D texture dimensions %d\n" , deviceProp.maxTexture2D[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D texture dimensions %d\n" , deviceProp.maxTexture2D[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 3D texture dimensions %d\n" , deviceProp.maxTexture3D[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 3D texture dimensions %d\n" , deviceProp.maxTexture3D[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 3D texture dimensions %d\n" , deviceProp.maxTexture3D[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 1D layered texture dimensions %d\n" , deviceProp.maxTexture1DLayered[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 1D layered texture dimensions %d\n" , deviceProp.maxTexture1DLayered[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D layered texture dimensions %d\n" , deviceProp.maxTexture2DLayered[0] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D layered texture dimensions %d\n" , deviceProp.maxTexture2DLayered[1] );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum 2D layered texture dimensions %d\n" , deviceProp.maxTexture2DLayered[2] );
ierr = PetscPrintf(PETSC_COMM_SELF," Alignment requirements for surfaces %d\n" , deviceProp.surfaceAlignment );
ierr = PetscPrintf(PETSC_COMM_SELF," Device can possibly execute multiple kernels concurrently %d\n" , deviceProp.concurrentKernels );
ierr = PetscPrintf(PETSC_COMM_SELF," Device has ECC support enabled %d\n" , deviceProp.ECCEnabled );
ierr = PetscPrintf(PETSC_COMM_SELF," PCI bus ID of the device %d\n" , deviceProp.pciBusID );
ierr = PetscPrintf(PETSC_COMM_SELF," PCI device ID of the device %d\n" , deviceProp.pciDeviceID );
ierr = PetscPrintf(PETSC_COMM_SELF," PCI domain ID of the device %d\n" , deviceProp.pciDomainID );
ierr = PetscPrintf(PETSC_COMM_SELF," 1 if device is a Tesla device using TCC driver, 0 otherwise %d\n" , deviceProp.tccDriver );
ierr = PetscPrintf(PETSC_COMM_SELF," Number of asynchronous engines %d\n" , deviceProp.asyncEngineCount );
ierr = PetscPrintf(PETSC_COMM_SELF," Device shares a unified address space with the host %d\n" , deviceProp.unifiedAddressing );
ierr = PetscPrintf(PETSC_COMM_SELF," Peak memory clock frequency in kilohertz %d\n" , deviceProp.memoryClockRate );
ierr = PetscPrintf(PETSC_COMM_SELF," Global memory bus width in bits %d\n" , deviceProp.memoryBusWidth );
ierr = PetscPrintf(PETSC_COMM_SELF," Size of L2 cache in bytes %d\n" , deviceProp.l2CacheSize );
ierr = PetscPrintf(PETSC_COMM_SELF," Maximum resident threads per multiprocessor %d\n" , deviceProp.maxThreadsPerMultiProcessor );
} else {
ierr = PetscPrintf(PETSC_COMM_SELF, " Unable to determine device %d properties, exiting\n",deviceNum);CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_SELF, " cuda Error: %s , exiting\n",cudaGetErrorString( ierrCuda));CHKERRQ(ierr);
return -1;
}
PetscLogEventRegister("ComputeFunction",0,&LogFunction);
ierr = PetscOptionsGetString(PETSC_NULL,"-da_vec_type",typeName,256,&flg);CHKERRQ(ierr);
if (flg) {
ierr = PetscStrstr(typeName,"cusp",&tmp);CHKERRQ(ierr);
if (tmp) useCUSP = PETSC_TRUE;
}
size_t sizeIndex = 3 * sizeof(PetscInt);
CUDA_SAFE_CALL(cudaMalloc((void **) &cudaTest, sizeIndex)); // Allocate array on device
//ierr = DMDACreate1d(PETSC_COMM_WORLD,DMDA_BOUNDARY_NONE,-8,1,1,PETSC_NULL,&da);CHKERRQ(ierr);
PetscInt globalSize = 125;
globalSize = 99;
DM da;
ierr = DMDACreate3d(PETSC_COMM_WORLD,DMDA_BOUNDARY_NONE,DMDA_BOUNDARY_NONE,DMDA_BOUNDARY_NONE,DMDA_STENCIL_STAR,-globalSize,-globalSize,-globalSize,PETSC_DECIDE,PETSC_DECIDE,PETSC_DECIDE,1,1,PETSC_NULL,PETSC_NULL,PETSC_NULL,&da);CHKERRQ(ierr);
ierr = DMCreateGlobalVector(da,&x); VecDuplicate(x,&f);CHKERRQ(ierr);
if (useCUSP)
{
ierr = DMCreateMatrix(da,MATAIJCUSP,&J);CHKERRQ(ierr);
}
else
{
ierr = DMCreateMatrix(da,MATAIJ,&J);CHKERRQ(ierr);
}
PetscInt GlobalDAMx,GlobalDAMy,GlobalDAMz;
ierr = DMDAGetInfo(da,PETSC_IGNORE,&GlobalDAMx,&GlobalDAMy,&GlobalDAMz,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
// ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
WFSModel FemModel(myrank,deviceNum,100);// fem mesh data
// StarStencil stencil_op(0,0,xs,ys,zs,xm,ym,zm,hx,hy,hz);// transformation operator
// ierr = DMSetApplicationContext(da,&stencil_op);CHKERRQ(ierr);
ierr = SNESCreate(PETSC_COMM_WORLD,&snes);CHKERRQ(ierr);
ierr = SNESSetFunction(snes,f,ComputeFunction,da);CHKERRQ(ierr);
ierr = SNESSetFromOptions(snes);CHKERRQ(ierr);
for (PetscInt iii = 0 ; iii < 1 ; iii++)
{
ierr = PetscPrintf(PETSC_COMM_WORLD, "gpu check %d \n",iii);CHKERRQ(ierr);
ierr = ComputeFunction(snes,x,f,(void *)da);
}
ierr = SNESSolve(snes,PETSC_NULL,x);CHKERRQ(ierr);
ierr = MatDestroy(&J);CHKERRQ(ierr);
ierr = VecDestroy(&x);CHKERRQ(ierr);
ierr = VecDestroy(&f);CHKERRQ(ierr);
ierr = SNESDestroy(&snes);CHKERRQ(ierr);
ierr = DMDestroy(&da);CHKERRQ(ierr);
// call device reset to flush buffer
CUDA_SAFE_CALL(cudaDeviceReset());
PetscFinalize();
return 0;
}
// PetscErrorCode ComputeFunction(SNES snes,Vec u,Vec f,void *ctx)
// {
// PetscInt i,j,k;
// PetscInt ustartshift,uendshift,xoffset,yoffset,zoffset,fstart;
// PetscScalar ***uu,***ff,hxhzdhy,hyhzdhx,hxhydhz;
// PetscScalar u_val,u_east,u_west,u_north,u_south,u_up, u_down, u_xx, u_yy,u_zz,sc ,two =2.0;
// DM da = (DM) ctx;
// Vec ulocal;
// PetscErrorCode ierr;
// PetscMPIInt rank,size;
// MPI_Comm comm;
// CUSPARRAY *uarray,*farray;
// PetscLogEventBegin(LogFunction,0,0,0,0); // init libMesh
//
// ierr = DMGetLocalVector(da,&ulocal);CHKERRQ(ierr);
// ierr = DMGlobalToLocalBegin(da,u,INSERT_VALUES,ulocal);CHKERRQ(ierr);
// ierr = DMGlobalToLocalEnd(da,u,INSERT_VALUES,ulocal);CHKERRQ(ierr);
// StarStencil *stencil_op;
// ierr = DMGetApplicationContext(da,(void *)&stencil_op);CHKERRQ(ierr);
// hxhzdhy = stencil_op->m_hx*stencil_op->m_hz/stencil_op->m_hy;
// hyhzdhx = stencil_op->m_hy*stencil_op->m_hz/stencil_op->m_hx;
// hxhydhz = stencil_op->m_hx*stencil_op->m_hy/stencil_op->m_hz;
// sc = stencil_op->m_hx*stencil_op->m_hy*stencil_op->m_hz*3.0;
//
// if (useCUSP) {
// ierr = VecCUSPGetArrayRead(ulocal,&uarray);CHKERRQ(ierr);
// ierr = VecCUSPGetArrayWrite(f,&farray);CHKERRQ(ierr);
// ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
// ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
// ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
// if (rank) ustartshift = 1; else ustartshift = 0;
// if (rank != size-1) uendshift = 1; else uendshift = 0;
// xoffset = 1;
// yoffset = stencil_op->m_xm;
// zoffset = stencil_op->m_xm*stencil_op->m_ym;
// ierr = VecGetOwnershipRange(f,&fstart,PETSC_NULL);CHKERRQ(ierr);
// try {
//
// // typedef these iterators for shorthand
// thrust::for_each(
// thrust::make_zip_iterator(
// thrust::make_tuple(
// farray->begin(), //0
// thrust::counting_iterator<int>(fstart) , //1
// thrust::make_zip_iterator(
// thrust::make_tuple(
// uarray->begin()+ustartshift, //1 u(i ,j ,k )
// uarray->begin()+ustartshift + xoffset, //2 u(i+1,j ,k )
// uarray->begin()+ustartshift - xoffset, //3 u(i-1,j ,k )
// uarray->begin()+ustartshift + yoffset, //4 u(i ,j+1,k )
// uarray->begin()+ustartshift - yoffset, //5 u(i ,j-1,k )
// uarray->begin()+ustartshift + zoffset, //6 u(i ,j ,k+1)
// uarray->begin()+ustartshift - zoffset //7 u(i ,j ,k-1)
// )),
// thrust::make_zip_iterator(
// thrust::make_tuple(
// thrust::constant_iterator<PetscScalar>(6.0 ),//0 perfusion
// thrust::constant_iterator<PetscScalar>(0.57 ),//1 conduction
// thrust::constant_iterator<PetscScalar>(5.e2 ),//2 scattering
// thrust::constant_iterator<PetscScalar>(14.e3) //3 absorption
// ))
// )),
// thrust::make_zip_iterator(
// thrust::make_tuple(
// farray->end(), //0
// thrust::counting_iterator<int>(fstart) + u->map->n , //1
// thrust::make_zip_iterator(
// thrust::make_tuple(
// uarray->end()+uendshift, //2_0 u(i ,j ,k )
// uarray->end()+uendshift + xoffset, //2_1 u(i+1,j ,k )
// uarray->end()+uendshift - xoffset, //2_2 u(i-1,j ,k )
// uarray->end()+uendshift + yoffset, //2_3 u(i ,j+1,k )
// uarray->end()+uendshift - yoffset, //2_4 u(i ,j-1,k )
// uarray->end()+uendshift + zoffset, //2_5 u(i ,j ,k+1)
// uarray->end()+uendshift - zoffset //2_6 u(i ,j ,k-1)
// )),
// thrust::make_zip_iterator(
// thrust::make_tuple(
// thrust::constant_iterator<PetscScalar>(6.0 ),//3_0 perfusion
// thrust::constant_iterator<PetscScalar>(0.57 ),//3_1 conduction
// thrust::constant_iterator<PetscScalar>(5.e2 ),//3_2 scattering
// thrust::constant_iterator<PetscScalar>(14.e3) //3_3 absorption
// ))
// )),
// *stencil_op);
//
// PetscInt hostTest[3]={-1,-1,-1};
// //CUDA_SAFE_CALL(cudaMemcpy(hostTest, cudaTest,3*sizeof(PetscInt),cudaMemcpyDeviceToHost));
// ierr = PetscPrintf(PETSC_COMM_WORLD, "%d %d %d \n",hostTest[0],hostTest[1],hostTest[2]);CHKERRQ(ierr);
// }
// catch(char* all){
// ierr = PetscPrintf(PETSC_COMM_WORLD, "Thrust is not working\n");CHKERRQ(ierr);
// }
// ierr = VecCUSPRestoreArrayRead(ulocal,&uarray);CHKERRQ(ierr);
// ierr = VecCUSPRestoreArrayWrite(f,&farray);CHKERRQ(ierr);
// } else {
// ierr = DMDAVecGetArray(da,ulocal,&uu);CHKERRQ(ierr);
// ierr = DMDAVecGetArray(da,f,&ff);CHKERRQ(ierr);
//
// PetscInt GlobalDAMx,GlobalDAMy,GlobalDAMz;
// ierr = DMDAGetInfo(da,PETSC_IGNORE,&GlobalDAMx,&GlobalDAMy,&GlobalDAMz,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
// /* Compute function over the locally owned part of the grid */
// for (k=stencil_op->m_zs; k<stencil_op->m_zs+stencil_op->m_zm; k++) {
// for (j=stencil_op->m_ys; j<stencil_op->m_ys+stencil_op->m_ym; j++) {
// for (i=stencil_op->m_xs; i<stencil_op->m_xs+stencil_op->m_xm; i++) {
// if (i == 0 || j == 0 || k == 0 || i == GlobalDAMx-1 || j == GlobalDAMy-1 || k == GlobalDAMz-1) {
// ff[k][j][i] = uu[k][j][i];
// } else {
// u_val = uu[k][j][i];
// u_east = uu[k][j][i+1];
// u_west = uu[k][j][i-1];
// u_north = uu[k][j+1][i];
// u_south = uu[k][j-1][i];
// u_up = uu[k+1][j][i];
// u_down = uu[k-1][j][i];
// u_xx = (-u_east + two*u_val - u_west )*hyhzdhx;
// u_yy = (-u_north + two*u_val - u_south)*hxhzdhy;
// u_zz = (-u_up + two*u_val - u_down )*hxhydhz;
// ff[k][j][i] = u_xx + u_yy + u_zz - sc*PetscExpScalar(u_val);
// }
// }
// }
// }
// ierr = DMDAVecRestoreArray(da,ulocal,&uu);CHKERRQ(ierr);
// ierr = DMDAVecRestoreArray(da,f,&ff);CHKERRQ(ierr);
// }
// ierr = DMRestoreLocalVector(da,&ulocal);CHKERRQ(ierr);
// PetscLogEventEnd(LogFunction,0,0,0,0); // init libMesh
// //VecView(u,0);printf("f\n");
// //VecView(f,0);
// return 0;
//
// }
// PetscErrorCode ComputeJacobian(SNES snes,Vec x,Mat *J,Mat *B,MatStructure *flag,void *ctx)
// {
// DM da = (DM) ctx;
// Vec xlocal;
// PetscErrorCode ierr;
// if(jacobianComputed) return 0;
// jacobianComputed = PETSC_TRUE;
//
// ierr = DMGetLocalVector(da,&xlocal);DMGlobalToLocalBegin(da,x,INSERT_VALUES,xlocal);CHKERRQ(ierr);
// ierr = DMGlobalToLocalEnd(da,x,INSERT_VALUES,xlocal);CHKERRQ(ierr);
//
// PetscInt GlobalDAMx,GlobalDAMy,GlobalDAMz,xs,xm,ys,ym,zs,zm;
// PetscScalar hx,hy,hz;
// ierr = DMDAGetInfo(da,PETSC_IGNORE,&GlobalDAMx,&GlobalDAMy,&GlobalDAMz,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
// hx = 1.0/(PetscReal)(GlobalDAMx-1);
// hy = 1.0/(PetscReal)(GlobalDAMy-1);
// hz = 1.0/(PetscReal)(GlobalDAMz-1);
// ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
//
// PetscScalar hxhzdhy,hyhzdhx,hxhydhz,sc;
// hxhzdhy = hx*hz/hy;
// hyhzdhx = hy*hz/hx;
// hxhydhz = hx*hy/hz;
// sc = hx*hy*hz*3.0;
//
// ierr = MatZeroEntries(*J);CHKERRQ(ierr);
// ierr = MatShift(*J,1.0);CHKERRQ(ierr);
//
// StarStencil *stencil_op;
// ierr = DMGetApplicationContext(da,(void *)&stencil_op);CHKERRQ(ierr);
//
// /* Compute function over the locally owned part of the grid */
// PetscScalar v[7],two = 2.0;
// MatStencil col[7],row;
// PetscInt i,j,k;
// for (k=zs; k<zs+zm; k++) {
// for (j=ys; j<ys+ym; j++) {
// for (i=xs; i<xs+xm; i++) {
// row.k = k; row.j = j; row.i = i;
// if (i > 0 && j > 0 && k > 0 && i < GlobalDAMx-1 && j < GlobalDAMy-1 && k < GlobalDAMz-1) {
// v[0] = -0.5 * stencil_op->m_conduction * hxhydhz; col[0].k=k-1;col[0].j=j; col[0].i = i;
// v[1] = -0.5 * stencil_op->m_conduction * hxhzdhy; col[1].k=k; col[1].j=j-1;col[1].i = i;
// v[2] = -0.5 * stencil_op->m_conduction * hyhzdhx; col[2].k=k; col[2].j=j; col[2].i = i-1;
// v[3] = sc*( stencil_op->m_density*stencil_op->m_specificheat/stencil_op->m_deltat
// + 0.5 * stencil_op->m_perfusion * stencil_op->m_bloodspecificheat)
// + 1.0 * stencil_op->m_bloodspecificheat * (hyhzdhx+hxhzdhy+hxhydhz);
// col[3].k=row.k;col[3].j=row.j;col[3].i = row.i;
// v[4] = -0.5 * stencil_op->m_conduction * hyhzdhx; col[4].k=k; col[4].j=j; col[4].i = i+1;
// v[5] = -0.5 * stencil_op->m_conduction * hxhzdhy; col[5].k=k; col[5].j=j+1;col[5].i = i;
// v[6] = -0.5 * stencil_op->m_conduction * hxhydhz; col[6].k=k+1;col[6].j=j; col[6].i = i;
// ierr = MatSetValuesStencil(*J,1,&row,7,col,v,INSERT_VALUES);CHKERRQ(ierr);
// }
// }
// }
// }
//
// ierr = MatAssemblyBegin(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
// ierr = MatAssemblyEnd(*J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
// *flag = SAME_NONZERO_PATTERN;
// ierr = DMRestoreLocalVector(da,&xlocal);CHKERRQ(ierr);
// return 0;
// }
// gNek is really lightweight - and the input requirements are clearly defined.
// Feed us mesh vertex coordinates, the element-vertex connectivity, the
// element boundary conditions and the material parameters and we can
// feed back a solution. This can even be done through cubit or gmsh files [ I think ].
//
// I have to say that GPU compute is pretty much all or nothing - we also try to
// avoid too much traffic between host and device. However, we do the
// preprocessing on the host as this is usually a sub-dominant cost.
PetscErrorCode ComputeFunction(SNES snes,Vec u,Vec f,void *ctx)
{
WFSModel *FemModel= (WFSModel*) ctx;
PetscErrorCode ierr;
// get solution array for reading
// FIXME: will not work for mpi distributed array
// TODO: fix for MPI
ierr = VecCUSPGetArrayRead( u,&FemModel->uarray);CHKERRQ(ierr);
// get residual array for writing
ierr = VecCUSPGetArrayWrite(f,&FemModel->farray);CHKERRQ(ierr);
// loop over elements
// ensure thread safety by each thread writing to its own local residual
// ie similar to DG methods
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(
FemModel->ElementBegin(),
FemModel->ResidualBegin(),
FemModel->SolutionBegin(),
FemModel->ConstitutiveBegin()
)),
thrust::make_zip_iterator(thrust::make_tuple(
FemModel->ElementEnd(),
FemModel->ResidualEnd(),
FemModel->SolutionEnd(),
FemModel->ConstitutiveEnd()
)),
*FemModel // call the overloaded operator() from this class
);
// Reduce the expanded residual to the usual
// continuous additive contributions
// first need to sort
thrust::sort_by_key(
FemModel->m_LocalElementMap->begin(),
FemModel->m_LocalElementMap->end(),
FemModel->element_residuals->begin()
);
// reduce the sorted array
thrust::reduce_by_key(
FemModel->m_LocalElementMap->begin(),
FemModel->m_LocalElementMap->end(),
FemModel->element_residuals->begin(),
thrust::make_discard_iterator(),
FemModel->farray->begin()
);
// restore arrays
ierr = VecCUSPRestoreArrayRead( u,&FemModel->uarray);CHKERRQ(ierr);
ierr = VecCUSPRestoreArrayWrite(f,&FemModel->farray);CHKERRQ(ierr);
return 0;
}
|
d889834f9b22b0367c500d531e2aa10df6c1c0ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "phx_kernels.h"
#include "cuda/math.h"
#define ERROR_LEN 256
#define CUDA_ASSERT( x ) \
if( !(x) ) { if( error[0] != '\0' ) return; unsigned i = 0; while( #x[i] != '\0' ){error[i] = #x[i]; ++i;} error[i] = '\0'; return; }
__device__ const float dt = 2e-2f;
__device__ const float G = 1.f;
__device__ char error[ERROR_LEN] = "";
__global__ void getErr( char *err )
{
for( unsigned i = 0; i < ERROR_LEN; ++i )
{
err[i] = error[i];
}
error[0] = '\0';
}
std::string getErr()
{
char *d_err;
hipMalloc( &d_err, sizeof(char) * ERROR_LEN );
hipLaunchKernelGGL(( getErr), dim3(1), dim3(1), 0, 0, d_err );
char h_err[256];
hipMemcpy( h_err, d_err, sizeof(char) * 256, hipMemcpyDeviceToHost );
hipFree( d_err );
return h_err;
}
__device__ float3 get_dV( float3 myPos, float3 theirPos, float theirMass )
{
float3 dir = theirPos - myPos;
float r2 = dir.x * dir.x + dir.y * dir.y + dir.z * dir.z;
if( r2 < 1 ) r2 = 1; //return dir / sqrtf( r2 ) * dt;
return theirMass * dir / sqrtf( r2 ) * ( G * dt / r2 );
}
#ifndef PHX_DEBUG
__global__ void PHX::basic_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *cnt )
#else
__global__ void PHX::basic_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *cnt, float3 *dvs, unsigned who )
#endif
{
// shared mem temporarily turned off
//__shared__ float3 s_positions[512];
//__shared__ float s_masses[512];
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned count = *cnt;
float3 new_vel;
float3 old_pos;
if( index < count )
{
old_pos = positions[ index ];
new_vel = make_float3(0,0,0);
}
for( unsigned i = 0; i < gridDim.x; ++i )
{
// copy blockDim.x data from global to shared mem
/*
if( i + threadIdx.x < count )
{
s_positions[ threadIdx.x ] = positions[ i + threadIdx.x ];
s_masses[ threadIdx.x ] = masses[ i + threadIdx.x ];
}
__syncthreads();
*/
// use shared memory to calculate partial dV (interaction with planets [i..i+blockDim.x] )
if( index < count )
{
for( unsigned j = 0; j < blockDim.x; ++j )
{
unsigned other_index = i * blockDim.x + j;
if( other_index >= count ) break;
// don't interact with yourself
if( other_index != index )
{
//new_vel += get_dV( old_pos, s_positions[j], s_masses[j] );
#ifndef PHX_DEBUG
new_vel += get_dV( old_pos, positions[other_index], masses[other_index] );
#else
float3 dv = get_dV( old_pos, positions[other_index], masses[other_index] );
if( index == who )
{
dvs[ other_index ] = dv;
}
new_vel += dv;
#endif
}
}
}
}
__syncthreads();
if( index >= count )
{
return;
}
velocities[ index ] += new_vel;
}
#ifndef PHX_DEBUG
__global__ void PHX::inside_cluster_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *shuffle, unsigned *counts, unsigned cluster )
#else
__global__ void PHX::inside_cluster_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *shuffle, unsigned *counts, unsigned cluster, float3 *dvs, unsigned who, unsigned *whois )
#endif
{
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned offset = cluster ? counts[ cluster-1 ] : 0;
index += offset;
unsigned mapped_index = shuffle[ index ];
unsigned count = counts[ cluster ];
float3 vel_diff;
float3 old_pos;
if( index >= count )
{
return;
}
old_pos = positions[ mapped_index ];
vel_diff = make_float3(0,0,0);
for( unsigned i = offset; i < count; i += blockDim.x )
{
for( unsigned j = 0; j < blockDim.x && i + j < count; ++j )
{
unsigned other_index = shuffle[ i + j ];
if( other_index != mapped_index )
{
#ifndef PHX_DEBUG
vel_diff += get_dV( old_pos, positions[other_index], masses[other_index] );
#else
float3 dv = get_dV( old_pos, positions[other_index], masses[other_index] );
if( index == who )
{
dvs[ i + j ] = dv;
}
vel_diff += dv;
#endif
}
}
}
velocities[ mapped_index ] += vel_diff;
#ifdef PHX_DEBUG
CUDA_ASSERT( whois[ mapped_index ] == 0 );
whois[ mapped_index ] = index;
#endif
}
__global__ void PHX::outside_cluster_interaction( float3 *centers, float *masses, unsigned count, float3 *velocities_impact )
{
unsigned tid = threadIdx.x;
float3 pos = centers[tid];
float3 new_vel = make_float3(0,0,0);
for( unsigned i = 0; i < count; ++i )
{
if( i != tid )
{
new_vel += get_dV( pos, centers[i], masses[i] );
}
}
velocities_impact[tid] = new_vel;
}
__global__ void PHX::propagate_velocities( float3 *velocities_impact, float3 *positions, float3 *velocities, unsigned *shuffle, unsigned *count, unsigned last_cluster )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned cluster = 0;
if( index >= count[last_cluster] )
{
return;
}
// znajdujemy nasz klaster
while( count[cluster] <= index ) ++cluster;
// i zwikszamy swoj prdko
velocities[ shuffle[ index ] ] += velocities_impact[ cluster ];
}
__global__ void PHX::update_positions_kernel( float3 *positions, float3 *velocities, unsigned *count )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index < *count )
{
positions[ index ] += velocities[ index ] * dt;
}
}
__device__ bool collision_detected( float3 pos1, float r1, float3 pos2, float r2 )
{
if( r1 == 0 || r2 == 0 ) return false;
float3 dp = pos1 - pos2;
float d2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; //kwadrat odlegoci rodkw
return d2 < (r1+r2)*(r1+r2); // TODO coverage?
}
__global__ void PHX::detect_collisions( float3 *positions, float *radiuses, unsigned *count, unsigned *shuffle, unsigned last_cluster, unsigned *merges, unsigned *merge_needed )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index >= count[last_cluster] )
{
return;
}
unsigned mapped_index = shuffle[ index ];
float3 my_pos = positions[ mapped_index ];
float my_radius = radiuses[ mapped_index ];
unsigned cluster = 0;
// znajdujemy nasz klaster
while( count[cluster] <= index ) ++cluster;
unsigned limit = count[cluster];
for( unsigned i = index + 1; i < limit; ++i )
{
unsigned other_index = shuffle[i];
if( collision_detected( my_pos, my_radius, positions[other_index], radiuses[other_index] ) )
{
merges[ mapped_index ] = other_index;
*merge_needed = 1;
return;
}
}
merges[ mapped_index ] = mapped_index; // brak kolizji
}
__global__ void PHX::detect_collisions_no_clusters( float3 *positions, float *radiuses, unsigned count, unsigned *merges, unsigned *merge_needed )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index >= count )
{
return;
}
float3 my_pos = positions[ index ];
float my_radius = radiuses[ index ];
for( unsigned i = index + 1; i < count; ++i )
{
if( collision_detected( my_pos, my_radius, positions[i], radiuses[i] ) )
{
merges[ index ] = i;
*merge_needed = 1;
return;
}
}
merges[ index ] = index; // brak kolizji
}
__device__ void internal_merge( float3 *positions, float3 *velocities, float *masses, float *radiuses, unsigned idx1, unsigned idx2 )
{
// wynik sklejenia lduje w idx1, wic by moe trzeba je zamieni
if( radiuses[idx1] < radiuses[idx2] )
{
unsigned tmp = idx1;
idx1 = idx2;
idx2 = tmp;
}
float a1 = ( radiuses[idx1] * masses[idx1] ) / ( (radiuses[idx1] * masses[idx1]) + (radiuses[idx2] * masses[idx2]) );
float b1 = masses[idx1] / ( masses[idx1] + masses[idx2] );
positions[idx1] = positions[idx1] * a1 + positions[idx2] * (1-a1);
velocities[idx1] = velocities[idx1] * b1 + velocities[idx2] * (1-b1);
masses[idx1] += masses[idx2];
radiuses[idx1] = powf( powf(radiuses[idx1], 3.) + powf(radiuses[idx2], 3.), 1.f/3.f );
// oznacz jako skasowan
masses[idx2] = 0;
radiuses[idx2] = 0;
}
__global__ void PHX::merge_collisions( unsigned *in_merges, unsigned *out_merges, float3 *positions, float3 *velocities, float *masses, float *radiuses, unsigned *count, unsigned *done )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index >= *count )
{
return;
}
unsigned to_merge = in_merges[ index ];
if( index == to_merge )
{
out_merges[ index ] = in_merges[ index ];
return;
}
if( in_merges[ to_merge ] != to_merge )
{
// ups, nasz kandydat na planet znalaz kogo innego - czekamy na lepsze czasy
out_merges[ index ] = in_merges[ index ];
*done = 0;
return;
}
// jeeli dotarlimy tutaj, to ju mona mergowa
internal_merge( positions, velocities, masses, radiuses, index, to_merge );
out_merges[ index ] = index;
}
| d889834f9b22b0367c500d531e2aa10df6c1c0ff.cu | #include "phx_kernels.h"
#include "cuda/math.h"
#define ERROR_LEN 256
#define CUDA_ASSERT( x ) \
if( !(x) ) { if( error[0] != '\0' ) return; unsigned i = 0; while( #x[i] != '\0' ){error[i] = #x[i]; ++i;} error[i] = '\0'; return; }
__device__ const float dt = 2e-2f;
__device__ const float G = 1.f;
__device__ char error[ERROR_LEN] = "";
__global__ void getErr( char *err )
{
for( unsigned i = 0; i < ERROR_LEN; ++i )
{
err[i] = error[i];
}
error[0] = '\0';
}
std::string getErr()
{
char *d_err;
cudaMalloc( &d_err, sizeof(char) * ERROR_LEN );
getErr<<<1, 1>>>( d_err );
char h_err[256];
cudaMemcpy( h_err, d_err, sizeof(char) * 256, cudaMemcpyDeviceToHost );
cudaFree( d_err );
return h_err;
}
__device__ float3 get_dV( float3 myPos, float3 theirPos, float theirMass )
{
float3 dir = theirPos - myPos;
float r2 = dir.x * dir.x + dir.y * dir.y + dir.z * dir.z;
if( r2 < 1 ) r2 = 1; //return dir / sqrtf( r2 ) * dt;
return theirMass * dir / sqrtf( r2 ) * ( G * dt / r2 );
}
#ifndef PHX_DEBUG
__global__ void PHX::basic_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *cnt )
#else
__global__ void PHX::basic_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *cnt, float3 *dvs, unsigned who )
#endif
{
// shared mem temporarily turned off
//__shared__ float3 s_positions[512];
//__shared__ float s_masses[512];
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned count = *cnt;
float3 new_vel;
float3 old_pos;
if( index < count )
{
old_pos = positions[ index ];
new_vel = make_float3(0,0,0);
}
for( unsigned i = 0; i < gridDim.x; ++i )
{
// copy blockDim.x data from global to shared mem
/*
if( i + threadIdx.x < count )
{
s_positions[ threadIdx.x ] = positions[ i + threadIdx.x ];
s_masses[ threadIdx.x ] = masses[ i + threadIdx.x ];
}
__syncthreads();
*/
// use shared memory to calculate partial dV (interaction with planets [i..i+blockDim.x] )
if( index < count )
{
for( unsigned j = 0; j < blockDim.x; ++j )
{
unsigned other_index = i * blockDim.x + j;
if( other_index >= count ) break;
// don't interact with yourself
if( other_index != index )
{
//new_vel += get_dV( old_pos, s_positions[j], s_masses[j] );
#ifndef PHX_DEBUG
new_vel += get_dV( old_pos, positions[other_index], masses[other_index] );
#else
float3 dv = get_dV( old_pos, positions[other_index], masses[other_index] );
if( index == who )
{
dvs[ other_index ] = dv;
}
new_vel += dv;
#endif
}
}
}
}
__syncthreads();
if( index >= count )
{
return;
}
velocities[ index ] += new_vel;
}
#ifndef PHX_DEBUG
__global__ void PHX::inside_cluster_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *shuffle, unsigned *counts, unsigned cluster )
#else
__global__ void PHX::inside_cluster_interaction( float3 *positions, float *masses, float3 *velocities, unsigned *shuffle, unsigned *counts, unsigned cluster, float3 *dvs, unsigned who, unsigned *whois )
#endif
{
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned offset = cluster ? counts[ cluster-1 ] : 0;
index += offset;
unsigned mapped_index = shuffle[ index ];
unsigned count = counts[ cluster ];
float3 vel_diff;
float3 old_pos;
if( index >= count )
{
return;
}
old_pos = positions[ mapped_index ];
vel_diff = make_float3(0,0,0);
for( unsigned i = offset; i < count; i += blockDim.x )
{
for( unsigned j = 0; j < blockDim.x && i + j < count; ++j )
{
unsigned other_index = shuffle[ i + j ];
if( other_index != mapped_index )
{
#ifndef PHX_DEBUG
vel_diff += get_dV( old_pos, positions[other_index], masses[other_index] );
#else
float3 dv = get_dV( old_pos, positions[other_index], masses[other_index] );
if( index == who )
{
dvs[ i + j ] = dv;
}
vel_diff += dv;
#endif
}
}
}
velocities[ mapped_index ] += vel_diff;
#ifdef PHX_DEBUG
CUDA_ASSERT( whois[ mapped_index ] == 0 );
whois[ mapped_index ] = index;
#endif
}
__global__ void PHX::outside_cluster_interaction( float3 *centers, float *masses, unsigned count, float3 *velocities_impact )
{
unsigned tid = threadIdx.x;
float3 pos = centers[tid];
float3 new_vel = make_float3(0,0,0);
for( unsigned i = 0; i < count; ++i )
{
if( i != tid )
{
new_vel += get_dV( pos, centers[i], masses[i] );
}
}
velocities_impact[tid] = new_vel;
}
__global__ void PHX::propagate_velocities( float3 *velocities_impact, float3 *positions, float3 *velocities, unsigned *shuffle, unsigned *count, unsigned last_cluster )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned cluster = 0;
if( index >= count[last_cluster] )
{
return;
}
// znajdujemy nasz klaster
while( count[cluster] <= index ) ++cluster;
// i zwiększamy swoją prędkość
velocities[ shuffle[ index ] ] += velocities_impact[ cluster ];
}
__global__ void PHX::update_positions_kernel( float3 *positions, float3 *velocities, unsigned *count )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index < *count )
{
positions[ index ] += velocities[ index ] * dt;
}
}
__device__ bool collision_detected( float3 pos1, float r1, float3 pos2, float r2 )
{
if( r1 == 0 || r2 == 0 ) return false;
float3 dp = pos1 - pos2;
float d2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; //kwadrat odległości środków
return d2 < (r1+r2)*(r1+r2); // TODO coverage?
}
__global__ void PHX::detect_collisions( float3 *positions, float *radiuses, unsigned *count, unsigned *shuffle, unsigned last_cluster, unsigned *merges, unsigned *merge_needed )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index >= count[last_cluster] )
{
return;
}
unsigned mapped_index = shuffle[ index ];
float3 my_pos = positions[ mapped_index ];
float my_radius = radiuses[ mapped_index ];
unsigned cluster = 0;
// znajdujemy nasz klaster
while( count[cluster] <= index ) ++cluster;
unsigned limit = count[cluster];
for( unsigned i = index + 1; i < limit; ++i )
{
unsigned other_index = shuffle[i];
if( collision_detected( my_pos, my_radius, positions[other_index], radiuses[other_index] ) )
{
merges[ mapped_index ] = other_index;
*merge_needed = 1;
return;
}
}
merges[ mapped_index ] = mapped_index; // brak kolizji
}
__global__ void PHX::detect_collisions_no_clusters( float3 *positions, float *radiuses, unsigned count, unsigned *merges, unsigned *merge_needed )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index >= count )
{
return;
}
float3 my_pos = positions[ index ];
float my_radius = radiuses[ index ];
for( unsigned i = index + 1; i < count; ++i )
{
if( collision_detected( my_pos, my_radius, positions[i], radiuses[i] ) )
{
merges[ index ] = i;
*merge_needed = 1;
return;
}
}
merges[ index ] = index; // brak kolizji
}
__device__ void internal_merge( float3 *positions, float3 *velocities, float *masses, float *radiuses, unsigned idx1, unsigned idx2 )
{
// wynik sklejenia ląduje w idx1, więc być może trzeba je zamienić
if( radiuses[idx1] < radiuses[idx2] )
{
unsigned tmp = idx1;
idx1 = idx2;
idx2 = tmp;
}
float a1 = ( radiuses[idx1] * masses[idx1] ) / ( (radiuses[idx1] * masses[idx1]) + (radiuses[idx2] * masses[idx2]) );
float b1 = masses[idx1] / ( masses[idx1] + masses[idx2] );
positions[idx1] = positions[idx1] * a1 + positions[idx2] * (1-a1);
velocities[idx1] = velocities[idx1] * b1 + velocities[idx2] * (1-b1);
masses[idx1] += masses[idx2];
radiuses[idx1] = powf( powf(radiuses[idx1], 3.) + powf(radiuses[idx2], 3.), 1.f/3.f );
// oznacz jako skasowaną
masses[idx2] = 0;
radiuses[idx2] = 0;
}
__global__ void PHX::merge_collisions( unsigned *in_merges, unsigned *out_merges, float3 *positions, float3 *velocities, float *masses, float *radiuses, unsigned *count, unsigned *done )
{
unsigned index = threadIdx.x + blockDim.x * blockIdx.x;
if( index >= *count )
{
return;
}
unsigned to_merge = in_merges[ index ];
if( index == to_merge )
{
out_merges[ index ] = in_merges[ index ];
return;
}
if( in_merges[ to_merge ] != to_merge )
{
// ups, nasz kandydat na planetę znalazł kogoś innego - czekamy na lepsze czasy
out_merges[ index ] = in_merges[ index ];
*done = 0;
return;
}
// jeżeli dotarliśmy tutaj, to już można mergować
internal_merge( positions, velocities, masses, radiuses, index, to_merge );
out_merges[ index ] = index;
}
|
5e837d213c75f9b76ca909e4b1d4b86c5c692a8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void neq_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] != dx[i];
}
} | 5e837d213c75f9b76ca909e4b1d4b86c5c692a8a.cu | #include "includes.h"
__global__ void neq_strided_float(int n, int xOffset,int yOffset,float *dx, float *dy,int incx,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] != dx[i];
}
} |
fef635ba0c89e6578e7c71b178944217994f8bb4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
int *a, *b; // host data
int *c, *c2; // results
//Cuda error checking - non mandatory
void cudaCheckError() {
hipError_t e=hipGetLastError();
if(e!=hipSuccess) {
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e));
exit(0);
}
}
//GPU kernel Matrix product
__global__
void vecMul(int *A,int *B,int *C,int N){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < N && col < N) {
int tmpSum = 0.0f;
for (int i = 0; i < N; i++) {
tmpSum += A[row * N + i] * B[i * N + col];
}
C[row * N + col] = tmpSum;
}
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=4;
int size = n*n;
//Number of blocks
int nBytes = size*sizeof(int);
//Block size and number
int block_size, block_no;
//memory allocation
a = (int *) malloc(nBytes);
b = (int *) malloc(nBytes);
c = (int *) malloc(nBytes);
c2 = (int *) malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size = 16; //threads per block
block_no = size/block_size;
//Work definition
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
// Data filling
for (int i=0; i<n; i++){
for (int j=0; j<n; j++){
a[i*n+j] = i;
b[i*n+j] = j;
}
}
printf("Allocating device memory on host..\n");
//GPU memory allocation
hipMalloc((void **) &a_d, size*sizeof(int));
hipMalloc((void **) &b_d, size*sizeof(int));
hipMalloc((void **) &c_d, size*sizeof(int));
printf("Copying to device..\n");
hipMemcpy(a_d, a, size*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(b_d, b, size*sizeof(int), hipMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
hipLaunchKernelGGL(( vecMul), dim3(block_no),dim3(block_size), 0, 0, a_d, b_d, c_d, n);
cudaCheckError();
//Wait for kernel call to finish
hipDeviceSynchronize();
clock_t end_d = clock();
//Time computing
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
printf("n = %d \t GPU time = %fs \n", size, time_d);
//Free GPU memory
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
} | fef635ba0c89e6578e7c71b178944217994f8bb4.cu | #include <stdio.h>
#include <cuda.h>
int *a, *b; // host data
int *c, *c2; // results
//Cuda error checking - non mandatory
void cudaCheckError() {
cudaError_t e=cudaGetLastError();
if(e!=cudaSuccess) {
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e));
exit(0);
}
}
//GPU kernel Matrix product
__global__
void vecMul(int *A,int *B,int *C,int N){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < N && col < N) {
int tmpSum = 0.0f;
for (int i = 0; i < N; i++) {
tmpSum += A[row * N + i] * B[i * N + col];
}
C[row * N + col] = tmpSum;
}
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=4;
int size = n*n;
//Number of blocks
int nBytes = size*sizeof(int);
//Block size and number
int block_size, block_no;
//memory allocation
a = (int *) malloc(nBytes);
b = (int *) malloc(nBytes);
c = (int *) malloc(nBytes);
c2 = (int *) malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size = 16; //threads per block
block_no = size/block_size;
//Work definition
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
// Data filling
for (int i=0; i<n; i++){
for (int j=0; j<n; j++){
a[i*n+j] = i;
b[i*n+j] = j;
}
}
printf("Allocating device memory on host..\n");
//GPU memory allocation
cudaMalloc((void **) &a_d, size*sizeof(int));
cudaMalloc((void **) &b_d, size*sizeof(int));
cudaMalloc((void **) &c_d, size*sizeof(int));
printf("Copying to device..\n");
cudaMemcpy(a_d, a, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, size*sizeof(int), cudaMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecMul<<<block_no,block_size>>>(a_d, b_d, c_d, n);
cudaCheckError();
//Wait for kernel call to finish
cudaThreadSynchronize();
clock_t end_d = clock();
//Time computing
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
printf("n = %d \t GPU time = %fs \n", size, time_d);
//Free GPU memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
} |
98f8a8998408657dec80c5cc509520fc998a7a6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file generateBC2.cu
* \brief Implementation of the kernels to generate elements of the right hand-side
* of the Poisson solver.
*/
#include "generateBC2.h"
/**
* \namespace kernels
* \brief Contains all custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the bottom and top boundaries at the v-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param yminus bottom-boundary velocities
* \param yplus top-boundary velocities
* \param dx cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_v(real *bc2, real *yminus, real *yplus, real *dx, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>=nx)
return;
bc2[i] -= yminus[i+nx-1]*dx[i];
bc2[(ny-1)*nx + i] += yplus[i+nx-1]*dx[i];
} // fillBC2_v
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the left and right boundaries at the u-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param xminus left-boundary velocities
* \param xplus right-boundary velocities
* \param dy cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_u(real *bc2, real *xminus, real *xplus, real *dy, int nx, int ny)
{
int j = threadIdx.x + blockIdx.x*blockDim.x;
if(j>=ny)
return;
bc2[j*nx] -= xminus[j]*dy[j];
bc2[j*nx+nx-1] += xplus[j]*dy[j];
} // fillBC2_u
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the no-slip constraint at the body-point locations.
*
* \param bc2 array that contains boundary conditions
* \param uB x-component of the body-velocity
* \param vB y-component of the body-velcoity
* \param totalPoints number of body-points (all bodies included)
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_uvB(real *bc2, real *uB, real *vB, int totalPoints, int nx, int ny)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k>=totalPoints)
return;
bc2[nx*ny + k] = uB[k];
bc2[nx*ny + k + totalPoints] = vB[k];
} // fillBC2_uvB
} // End of namespace kernels
| 98f8a8998408657dec80c5cc509520fc998a7a6a.cu | /**
* \file generateBC2.cu
* \brief Implementation of the kernels to generate elements of the right hand-side
* of the Poisson solver.
*/
#include "generateBC2.h"
/**
* \namespace kernels
* \brief Contains all custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the bottom and top boundaries at the v-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param yminus bottom-boundary velocities
* \param yplus top-boundary velocities
* \param dx cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_v(real *bc2, real *yminus, real *yplus, real *dx, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i>=nx)
return;
bc2[i] -= yminus[i+nx-1]*dx[i];
bc2[(ny-1)*nx + i] += yplus[i+nx-1]*dx[i];
} // fillBC2_v
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the left and right boundaries at the u-velocity locations.
*
* \param bc2 array that contains boundary conditions
* \param xminus left-boundary velocities
* \param xplus right-boundary velocities
* \param dy cell-widths in the x-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_u(real *bc2, real *xminus, real *xplus, real *dy, int nx, int ny)
{
int j = threadIdx.x + blockIdx.x*blockDim.x;
if(j>=ny)
return;
bc2[j*nx] -= xminus[j]*dy[j];
bc2[j*nx+nx-1] += xplus[j]*dy[j];
} // fillBC2_u
/**
* \brief Computes inhomogeneous terms of the discrete divergence operator
* from the no-slip constraint at the body-point locations.
*
* \param bc2 array that contains boundary conditions
* \param uB x-component of the body-velocity
* \param vB y-component of the body-velcoity
* \param totalPoints number of body-points (all bodies included)
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
*/
__global__
void fillBC2_uvB(real *bc2, real *uB, real *vB, int totalPoints, int nx, int ny)
{
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k>=totalPoints)
return;
bc2[nx*ny + k] = uB[k];
bc2[nx*ny + k + totalPoints] = vB[k];
} // fillBC2_uvB
} // End of namespace kernels
|
ec22a9f4096b531f7ea152292c7fd296b85b15cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "warper.h"
__device__ float distance(float2 x1, float2 x2){
return sqrt(pow(x1.x - x2.x,2) + pow(x1.y - x2.y,2));
}
__global__ void distance_kernel(float2 *data_in, float *data_out, int n){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
float2 ref;
ref.x = 0.0;
ref.y = 0.0;
data_out[i] = distance(data_in[i], ref);
}
}
void run_kernel(float* h_in, float* h_out, int n){
float2 *d_in = NULL;
float *d_out = NULL;
size_t in_size = n*2*sizeof(float);
size_t out_size = n*sizeof(float);
//
hipMalloc((void**)&d_in, in_size);
hipMalloc((void**)&d_out, out_size);
//hostdevice
hipMemcpy(d_in, h_in, in_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( distance_kernel), dim3((N + TPB -1)/TPB),dim3(TPB), 0, 0, d_in, d_out, n);
//devicehost
hipMemcpy(h_out, d_out, out_size, hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
}
| ec22a9f4096b531f7ea152292c7fd296b85b15cf.cu | #include "warper.h"
__device__ float distance(float2 x1, float2 x2){
return sqrt(pow(x1.x - x2.x,2) + pow(x1.y - x2.y,2));
}
__global__ void distance_kernel(float2 *data_in, float *data_out, int n){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
float2 ref;
ref.x = 0.0;
ref.y = 0.0;
data_out[i] = distance(data_in[i], ref);
}
}
void run_kernel(float* h_in, float* h_out, int n){
float2 *d_in = NULL;
float *d_out = NULL;
size_t in_size = n*2*sizeof(float);
size_t out_size = n*sizeof(float);
//设备端分配内存
cudaMalloc((void**)&d_in, in_size);
cudaMalloc((void**)&d_out, out_size);
//拷贝host数据到device
cudaMemcpy(d_in, h_in, in_size, cudaMemcpyHostToDevice);
distance_kernel<<<(N + TPB -1)/TPB,TPB>>>(d_in, d_out, n);
//拷贝device端计算结果到host
cudaMemcpy(h_out, d_out, out_size, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
|
8f417ef487d059115ec306f07af8d69a128f559c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Date: 01-03-2017
Author: Omer Anjum
Description:
Copying internal halos from GPU to host
Comments:
Date: March 10, 2017
Omer Anjum
Very first version of code written.
*/
#include <stdio.h>
/****************************************************************************************/
__global__ void copy_internal_rows(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid)
{
//int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2);
const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x;
const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y;
const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z;
int halo_idx = (halo_idx_x) + (halo_idx_y)*(nx-2*halo_depth) + (halo_idx_z)*((nx-2*halo_depth)*(halo_depth*2)+(ny-(halo_depth*4))*(halo_depth*2));//last term 128*6+128*6
int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+halo_depth)*nx + (halo_idx_z+halo_depth)*nx*ny;
if(halo_idx_x < nx-2*halo_depth){
d_halo[halo_idx] = d_grid[d_grid_idx];
d_halo[halo_idx+((nx-2*halo_depth)*halo_depth+(ny-(halo_depth*4))*(halo_depth*2))] = d_grid[d_grid_idx+(ny-3*halo_depth)*nx];
}
}
/****************************************************************************************/
__global__ void copy_internal_cols(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid)
{
//int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2);
const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x;
const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y;
const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z;
int halo_idx = halo_depth*(nx-2*halo_depth) + (halo_idx_x) + (halo_idx_y)*2*halo_depth + (halo_idx_z)*((nx-2*halo_depth)*(halo_depth*2)+(ny-(halo_depth*4))*(halo_depth*2));//last term 134*6+128*6, first term taking threads to where columns data starts
int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+2*halo_depth)*nx + (halo_idx_z+halo_depth)*nx*ny;
if(halo_idx_y < ny-4*halo_depth){
d_halo[halo_idx] = d_grid[d_grid_idx];
d_halo[halo_idx+halo_depth] = d_grid[d_grid_idx+(nx-3*halo_depth)];//---|idx|------|nx|---|nx+idx|
}
}
/****************************************************************************************/
__global__ void copy_internal_frtbk(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid)
{
//int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2);
const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x;
const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y;
const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z;
int halo_idx = (halo_depth*(nx-2*halo_depth)*2 +(ny-(halo_depth*4))*(halo_depth*2))*(nz-2*halo_depth) + (halo_idx_x) + (halo_idx_y)*(nx-2*halo_depth) + (halo_idx_z)*(nx-2*halo_depth)*(ny-2*halo_depth);//last term 134*6+128*6, first term taking threads to where columns data starts
int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+halo_depth)*nx + (halo_idx_z)*nx*ny;
if(halo_idx_x < nx - 2*halo_depth && halo_idx_y < ny - 2*halo_depth && halo_idx_z < nz){
d_halo[halo_idx] = d_grid[d_grid_idx];
d_halo[halo_idx+(nx-2*halo_depth)*(ny-2*halo_depth)*halo_depth] = d_grid[d_grid_idx+nx*ny*(nz-halo_depth)];
}
/*__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) {
printf("Writing thread (%d,%d,%d) at block (%d,%d,%d) \n",threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x,blockIdx.y,blockIdx.z );
printf("\n printing halo\n");
for (int k=0; k < halo_size; k++) {
printf("%d, ",d_halo[k]);
}
}*/
}
/****************************************************************************************/
void fillhalosinhost(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth)
{
//int ELEMS_PER_THREAD_in_z = nz-(2*halo_depth);
//TODO: Adapt for shearing-periodic case
static dim3 blocksPerGrid, threadsPerBlock;
//Create streams for executing the boundary copy
//kernels concurrently.
static hipStream_t per_row_stream = NULL;
if (per_row_stream == NULL)
hipStreamCreate(&per_row_stream);
static hipStream_t per_col_stream = NULL;
if (per_col_stream == NULL)
hipStreamCreate(&per_col_stream);
static hipStream_t per_frtbk_stream = NULL;
if (per_frtbk_stream == NULL)
hipStreamCreate(&per_frtbk_stream);
//Copy the top and bottom halos around the compute grid
threadsPerBlock.x = 6;// increase to 32
threadsPerBlock.y = halo_depth; // do not change
threadsPerBlock.z = 1; // do not change
blocksPerGrid.x = (int)ceil((double)nx-2*halo_depth / (double)threadsPerBlock.x);
printf("\n %d, %d,",blocksPerGrid.x, threadsPerBlock.y);
blocksPerGrid.y = 1;
blocksPerGrid.z = nz-(2*halo_depth);
//printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z);
//printf("\n----------------------\ngoing inside the kernel to copy rows\n-----------------------------\n");
hipLaunchKernelGGL(( copy_internal_rows), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, per_row_stream, d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid);
hipDeviceSynchronize();
//Copy the top and bottom halos around the compute grid
threadsPerBlock.x = halo_depth; // do not change
threadsPerBlock.y = 2; // increase to 32
threadsPerBlock.z = 1; // do not change
//printf("\n %d \n",threadsPerBlock.y);
blocksPerGrid.x = 1;
blocksPerGrid.y = (int)ceil((double)(ny-2*halo_depth) / (double)threadsPerBlock.y);
//printf("%d blocksPerGrid.y \n", blocksPerGrid.y);
blocksPerGrid.z = nz-(2*halo_depth);
//printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z);
//printf("\n----------------------\ngoing inside the kernel to copy cols\n-----------------------------\n");
hipLaunchKernelGGL(( copy_internal_cols), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, per_col_stream, d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid);
hipDeviceSynchronize();
//Copy the front and back halos around the compute grid
threadsPerBlock.x = 4;// increase to 32
threadsPerBlock.y = 6;// increase to 32
threadsPerBlock.z = 1; // do not change
//printf("\n %d \n",threadsPerBlock.y);
blocksPerGrid.x = (int)ceil((double)(nx-2*halo_depth) / (double)threadsPerBlock.x);
blocksPerGrid.y = (int)ceil((double)(ny-2*halo_depth) / (double)threadsPerBlock.y);
//printf("%d blocksPerGrid.y \n", blocksPerGrid.y);
blocksPerGrid.z = halo_depth;
//printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z);
//printf("\n----------------------\ngoing inside the kernel to copy frtbk\n-----------------------------\n");
hipLaunchKernelGGL(( copy_internal_frtbk), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, per_frtbk_stream, d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid);
hipDeviceSynchronize();
//printf("\n came back \n");
return;
}
/****************************************************************************************/
| 8f417ef487d059115ec306f07af8d69a128f559c.cu | /* Date: 01-03-2017
Author: Omer Anjum
Description:
Copying internal halos from GPU to host
Comments:
Date: March 10, 2017
Omer Anjum
Very first version of code written.
*/
#include <stdio.h>
/****************************************************************************************/
__global__ void copy_internal_rows(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid)
{
//int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2);
const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x;
const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y;
const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z;
int halo_idx = (halo_idx_x) + (halo_idx_y)*(nx-2*halo_depth) + (halo_idx_z)*((nx-2*halo_depth)*(halo_depth*2)+(ny-(halo_depth*4))*(halo_depth*2));//last term 128*6+128*6
int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+halo_depth)*nx + (halo_idx_z+halo_depth)*nx*ny;
if(halo_idx_x < nx-2*halo_depth){
d_halo[halo_idx] = d_grid[d_grid_idx];
d_halo[halo_idx+((nx-2*halo_depth)*halo_depth+(ny-(halo_depth*4))*(halo_depth*2))] = d_grid[d_grid_idx+(ny-3*halo_depth)*nx];
}
}
/****************************************************************************************/
__global__ void copy_internal_cols(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid)
{
//int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2);
const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x;
const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y;
const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z;
int halo_idx = halo_depth*(nx-2*halo_depth) + (halo_idx_x) + (halo_idx_y)*2*halo_depth + (halo_idx_z)*((nx-2*halo_depth)*(halo_depth*2)+(ny-(halo_depth*4))*(halo_depth*2));//last term 134*6+128*6, first term taking threads to where columns data starts
int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+2*halo_depth)*nx + (halo_idx_z+halo_depth)*nx*ny;
if(halo_idx_y < ny-4*halo_depth){
d_halo[halo_idx] = d_grid[d_grid_idx];
d_halo[halo_idx+halo_depth] = d_grid[d_grid_idx+(nx-3*halo_depth)];//---|idx|------|nx|---|nx+idx|
}
}
/****************************************************************************************/
__global__ void copy_internal_frtbk(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid)
{
//int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2);
const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x;
const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y;
const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z;
int halo_idx = (halo_depth*(nx-2*halo_depth)*2 +(ny-(halo_depth*4))*(halo_depth*2))*(nz-2*halo_depth) + (halo_idx_x) + (halo_idx_y)*(nx-2*halo_depth) + (halo_idx_z)*(nx-2*halo_depth)*(ny-2*halo_depth);//last term 134*6+128*6, first term taking threads to where columns data starts
int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+halo_depth)*nx + (halo_idx_z)*nx*ny;
if(halo_idx_x < nx - 2*halo_depth && halo_idx_y < ny - 2*halo_depth && halo_idx_z < nz){
d_halo[halo_idx] = d_grid[d_grid_idx];
d_halo[halo_idx+(nx-2*halo_depth)*(ny-2*halo_depth)*halo_depth] = d_grid[d_grid_idx+nx*ny*(nz-halo_depth)];
}
/*__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) {
printf("Writing thread (%d,%d,%d) at block (%d,%d,%d) \n",threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x,blockIdx.y,blockIdx.z );
printf("\n printing halo\n");
for (int k=0; k < halo_size; k++) {
printf("%d, ",d_halo[k]);
}
}*/
}
/****************************************************************************************/
void fillhalosinhost(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth)
{
//int ELEMS_PER_THREAD_in_z = nz-(2*halo_depth);
//TODO: Adapt for shearing-periodic case
static dim3 blocksPerGrid, threadsPerBlock;
//Create streams for executing the boundary copy
//kernels concurrently.
static cudaStream_t per_row_stream = NULL;
if (per_row_stream == NULL)
cudaStreamCreate(&per_row_stream);
static cudaStream_t per_col_stream = NULL;
if (per_col_stream == NULL)
cudaStreamCreate(&per_col_stream);
static cudaStream_t per_frtbk_stream = NULL;
if (per_frtbk_stream == NULL)
cudaStreamCreate(&per_frtbk_stream);
//Copy the top and bottom halos around the compute grid
threadsPerBlock.x = 6;// increase to 32
threadsPerBlock.y = halo_depth; // do not change
threadsPerBlock.z = 1; // do not change
blocksPerGrid.x = (int)ceil((double)nx-2*halo_depth / (double)threadsPerBlock.x);
printf("\n %d, %d,",blocksPerGrid.x, threadsPerBlock.y);
blocksPerGrid.y = 1;
blocksPerGrid.z = nz-(2*halo_depth);
//printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z);
//printf("\n----------------------\ngoing inside the kernel to copy rows\n-----------------------------\n");
copy_internal_rows<<<blocksPerGrid, threadsPerBlock, 0, per_row_stream>>>(d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid);
cudaThreadSynchronize();
//Copy the top and bottom halos around the compute grid
threadsPerBlock.x = halo_depth; // do not change
threadsPerBlock.y = 2; // increase to 32
threadsPerBlock.z = 1; // do not change
//printf("\n %d \n",threadsPerBlock.y);
blocksPerGrid.x = 1;
blocksPerGrid.y = (int)ceil((double)(ny-2*halo_depth) / (double)threadsPerBlock.y);
//printf("%d blocksPerGrid.y \n", blocksPerGrid.y);
blocksPerGrid.z = nz-(2*halo_depth);
//printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z);
//printf("\n----------------------\ngoing inside the kernel to copy cols\n-----------------------------\n");
copy_internal_cols<<<blocksPerGrid, threadsPerBlock, 0, per_col_stream>>>(d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid);
cudaThreadSynchronize();
//Copy the front and back halos around the compute grid
threadsPerBlock.x = 4;// increase to 32
threadsPerBlock.y = 6;// increase to 32
threadsPerBlock.z = 1; // do not change
//printf("\n %d \n",threadsPerBlock.y);
blocksPerGrid.x = (int)ceil((double)(nx-2*halo_depth) / (double)threadsPerBlock.x);
blocksPerGrid.y = (int)ceil((double)(ny-2*halo_depth) / (double)threadsPerBlock.y);
//printf("%d blocksPerGrid.y \n", blocksPerGrid.y);
blocksPerGrid.z = halo_depth;
//printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z);
//printf("\n----------------------\ngoing inside the kernel to copy frtbk\n-----------------------------\n");
copy_internal_frtbk<<<blocksPerGrid, threadsPerBlock, 0, per_frtbk_stream>>>(d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid);
cudaThreadSynchronize();
//printf("\n came back \n");
return;
}
/****************************************************************************************/
|
b26074e2af3c684184de77f00f760b324ff93bd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <time.h>
#include <float.h>
#include <hiprand/hiprand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
#define RANDVEC3 vec3(hiprand_uniform(local_rand_state),hiprand_uniform(local_rand_state),hiprand_uniform(local_rand_state))
__device__ vec3 random_in_unit_sphere(hiprandState_t *local_rand_state) {
vec3 p;
do {
p = 2.0f*RANDVEC3 - vec3(1,1,1);
} while (p.squared_length() >= 1.0f);
return p;
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable **world, hiprandState_t *local_rand_state) {
ray cur_ray = r;
float cur_attenuation = 1.0f;
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
vec3 target = rec.p + rec.normal + random_in_unit_sphere(local_rand_state);
cur_attenuation *= 0.5f;
cur_ray = ray(rec.p, target-rec.p);
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0); // exceeded recursion
}
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u,v);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*d_world = new hitable_list(d_list,2);
*d_camera = new camera();
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) {
delete *(d_list);
delete *(d_list+1);
delete *d_world;
delete *d_camera;
}
int main() {
int nx = 640;
int ny = 320;
int ns = 1000;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
// allocate random state
hiprandState_t *d_rand_state;
checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels*sizeof(hiprandState_t)));
// make our world of hitables & the camera
hitable **d_list;
checkCudaErrors(hipMalloc((void **)&d_list, 2*sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *)));
camera **d_camera;
checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(camera *)));
hipLaunchKernelGGL(( create_world), dim3(1),dim3(1), 0, 0, d_list,d_world,d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::ofstream myfile("test.ppm");
if (myfile.is_open()) {
myfile << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
myfile << ir << " " << ig << " " << ib << "\n";
}
}
myfile.close();
}
else std::cout << "Unable to open file";
// clean up
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( free_world), dim3(1),dim3(1), 0, 0, d_list,d_world,d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(fb));
hipDeviceReset();
} | b26074e2af3c684184de77f00f760b324ff93bd1.cu | #include <iostream>
#include <fstream>
#include <time.h>
#include <float.h>
#include <curand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
#define RANDVEC3 vec3(curand_uniform(local_rand_state),curand_uniform(local_rand_state),curand_uniform(local_rand_state))
__device__ vec3 random_in_unit_sphere(curandState *local_rand_state) {
vec3 p;
do {
p = 2.0f*RANDVEC3 - vec3(1,1,1);
} while (p.squared_length() >= 1.0f);
return p;
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable **world, curandState *local_rand_state) {
ray cur_ray = r;
float cur_attenuation = 1.0f;
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
vec3 target = rec.p + rec.normal + random_in_unit_sphere(local_rand_state);
cur_attenuation *= 0.5f;
cur_ray = ray(rec.p, target-rec.p);
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0); // exceeded recursion
}
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u,v);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*d_world = new hitable_list(d_list,2);
*d_camera = new camera();
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) {
delete *(d_list);
delete *(d_list+1);
delete *d_world;
delete *d_camera;
}
int main() {
int nx = 640;
int ny = 320;
int ns = 1000;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
// allocate random state
curandState *d_rand_state;
checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels*sizeof(curandState)));
// make our world of hitables & the camera
hitable **d_list;
checkCudaErrors(cudaMalloc((void **)&d_list, 2*sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *)));
camera **d_camera;
checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(camera *)));
create_world<<<1,1>>>(d_list,d_world,d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
render_init<<<blocks, threads>>>(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render<<<blocks, threads>>>(fb, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::ofstream myfile("test.ppm");
if (myfile.is_open()) {
myfile << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
myfile << ir << " " << ig << " " << ib << "\n";
}
}
myfile.close();
}
else std::cout << "Unable to open file";
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world<<<1,1>>>(d_list,d_world,d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(fb));
cudaDeviceReset();
} |
conv1d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <common/utils.h>
#include "filters.h"
void conv1d(double * in, const int size_in, double * filter, const int size_filter,
double *out)
{
int i,j;
int radius = size_filter / 2;
for(i=0;i<size_in;i++)
{
double sum = 0.0;
for(j=0;j<=radius;j++)
{
if( (i-j) >= 0) // left
{
sum += filter[radius - j]*in[i-j];
}
if( (i+j) < size_in && (j != 0)) // right
{
sum += filter[radius + j]*in[i+j];
}
}
out[i] = sum;
}
}
__global__ void gpu_conv1d(double *in, const int size_in, double * filter, const int size_filter,
double *out)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < size_in)
{
double sum = 0.0;
int radius = size_filter / 2;
int j;
for(j=0;j<=radius;j++)
{
if( (i-j) >= 0) // left
{
sum += filter[radius - j]*in[i-j];
}
if( (i+j) < size_in && (j != 0)) // right
{
sum += filter[radius + j]*in[i+j];
}
}
out[i] = sum;
}
}
int test_conv1d(int N, double& cpu_time, double& gpu_time)
{
int pass = 0;
double * signal = (double*)malloc(N*sizeof(double));
double * result = (double*)malloc(N*sizeof(double));
double * gpu_result = (double*)malloc(N*sizeof(double));
clock_t begin, end;
double *d_signal,*d_result,*d_filter;
CUDA_CALL(hipMalloc((void**)&d_signal, N*sizeof(double)));
CUDA_CALL(hipMalloc((void**)&d_result, N*sizeof(double)));
fill_rand(signal,N);
int fw = 5;
double * filter = ones(fw);
CUDA_CALL(hipMalloc((void**)&d_filter, fw*sizeof(double)));
CUDA_CALL(hipMemcpy(d_signal, signal, N*sizeof(double), hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_filter, filter, fw*sizeof(double), hipMemcpyHostToDevice));
begin = clock();
conv1d(signal, N, filter, fw, result);
end = clock();
printf("CPU elapsed: %lfs\n", elapsed(begin,end));
cpu_time = elapsed(begin,end);
int threadsPerBlock = 512;
int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
begin = clock();
hipLaunchKernelGGL(( gpu_conv1d), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_signal, N, d_filter, fw, d_result);
CUDA_CALL(hipDeviceSynchronize());
end = clock();
printf("GPU elapsed: %lfs\n", elapsed(begin,end));
gpu_time = elapsed(begin,end);
CUDA_CALL(hipMemcpy(gpu_result, d_result, N*sizeof(double), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(d_signal));
CUDA_CALL(hipFree(d_result));
CUDA_CALL(hipFree(d_filter));
if(!test_arrays_equal(gpu_result,result,N))
{
printf("Test failed!\n");
pass = 0;
} else
{
printf("Test passed!\n");
pass = 1;
}
free(signal);
free(result);
free(gpu_result);
return pass;
}
void timing()
{
int N = 10000;
double cpu,gpu;
FILE *csv;
csv = fopen("results/timing.csv", "w+");
if(!csv)
{
fprintf(stderr, "(host) unable to create timing results file!\n");
exit(EXIT_FAILURE);
}
fprintf(csv, "%s,%s,%s\n", "N", "CPU_Time", "GPU_Time");
for(N=1e4;N<=5e7;N *= 2)
{
if(!test_conv1d(N,cpu,gpu)) exit(EXIT_FAILURE);
fprintf(csv, "%d,%lf,%lf\n", N, cpu, gpu);
}
fclose(csv);
}
int main(int argc, char** argv)
{
// srand(time(0));
timing();
return 0;
} | conv1d.cu | #include <cuda_runtime.h>
#include <common/utils.h>
#include "filters.h"
void conv1d(double * in, const int size_in, double * filter, const int size_filter,
double *out)
{
int i,j;
int radius = size_filter / 2;
for(i=0;i<size_in;i++)
{
double sum = 0.0;
for(j=0;j<=radius;j++)
{
if( (i-j) >= 0) // left
{
sum += filter[radius - j]*in[i-j];
}
if( (i+j) < size_in && (j != 0)) // right
{
sum += filter[radius + j]*in[i+j];
}
}
out[i] = sum;
}
}
__global__ void gpu_conv1d(double *in, const int size_in, double * filter, const int size_filter,
double *out)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < size_in)
{
double sum = 0.0;
int radius = size_filter / 2;
int j;
for(j=0;j<=radius;j++)
{
if( (i-j) >= 0) // left
{
sum += filter[radius - j]*in[i-j];
}
if( (i+j) < size_in && (j != 0)) // right
{
sum += filter[radius + j]*in[i+j];
}
}
out[i] = sum;
}
}
int test_conv1d(int N, double& cpu_time, double& gpu_time)
{
int pass = 0;
double * signal = (double*)malloc(N*sizeof(double));
double * result = (double*)malloc(N*sizeof(double));
double * gpu_result = (double*)malloc(N*sizeof(double));
clock_t begin, end;
double *d_signal,*d_result,*d_filter;
CUDA_CALL(cudaMalloc((void**)&d_signal, N*sizeof(double)));
CUDA_CALL(cudaMalloc((void**)&d_result, N*sizeof(double)));
fill_rand(signal,N);
int fw = 5;
double * filter = ones(fw);
CUDA_CALL(cudaMalloc((void**)&d_filter, fw*sizeof(double)));
CUDA_CALL(cudaMemcpy(d_signal, signal, N*sizeof(double), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_filter, filter, fw*sizeof(double), cudaMemcpyHostToDevice));
begin = clock();
conv1d(signal, N, filter, fw, result);
end = clock();
printf("CPU elapsed: %lfs\n", elapsed(begin,end));
cpu_time = elapsed(begin,end);
int threadsPerBlock = 512;
int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
begin = clock();
gpu_conv1d<<<blocksPerGrid,threadsPerBlock>>>(d_signal, N, d_filter, fw, d_result);
CUDA_CALL(cudaDeviceSynchronize());
end = clock();
printf("GPU elapsed: %lfs\n", elapsed(begin,end));
gpu_time = elapsed(begin,end);
CUDA_CALL(cudaMemcpy(gpu_result, d_result, N*sizeof(double), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(d_signal));
CUDA_CALL(cudaFree(d_result));
CUDA_CALL(cudaFree(d_filter));
if(!test_arrays_equal(gpu_result,result,N))
{
printf("Test failed!\n");
pass = 0;
} else
{
printf("Test passed!\n");
pass = 1;
}
free(signal);
free(result);
free(gpu_result);
return pass;
}
void timing()
{
int N = 10000;
double cpu,gpu;
FILE *csv;
csv = fopen("results/timing.csv", "w+");
if(!csv)
{
fprintf(stderr, "(host) unable to create timing results file!\n");
exit(EXIT_FAILURE);
}
fprintf(csv, "%s,%s,%s\n", "N", "CPU_Time", "GPU_Time");
for(N=1e4;N<=5e7;N *= 2)
{
if(!test_conv1d(N,cpu,gpu)) exit(EXIT_FAILURE);
fprintf(csv, "%d,%lf,%lf\n", N, cpu, gpu);
}
fclose(csv);
}
int main(int argc, char** argv)
{
// srand(time(0));
timing();
return 0;
} |
5b8f308cd4339a416a33df2268034153999c6d59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "unroll_detail.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__host__ __device__ __forceinline__ Sum() {}
__host__ __device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__host__ __device__ __forceinline__ Avg() {}
__host__ __device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__host__ __device__ __forceinline__ Min() {}
__host__ __device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__host__ __device__ __forceinline__ Max() {}
__host__ __device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
cudev::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, hipStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
hipLaunchKernelGGL(( rowsKernel<T, S, D, Op>), dim3(grid), dim3(block), 0, stream, src, dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, hipStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
cudev::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, hipStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
hipLaunchKernelGGL(( colsKernel<BLOCK_SIZE, T, S, D, cn, Op>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, hipStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 5b8f308cd4339a416a33df2268034153999c6d59.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "unroll_detail.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__host__ __device__ __forceinline__ Sum() {}
__host__ __device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__host__ __device__ __forceinline__ Avg() {}
__host__ __device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__host__ __device__ __forceinline__ Min() {}
__host__ __device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__host__ __device__ __forceinline__ Max() {}
__host__ __device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
cudev::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, cudaStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
rowsKernel<T, S, D, Op><<<grid, block, 0, stream>>>(src, dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, cudaStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
cudev::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, cudaStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
colsKernel<BLOCK_SIZE, T, S, D, cn, Op><<<grid, block, 0, stream>>>((PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, cudaStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
f47ec29f6fbdd0056b22fe3ec5dc8ed02340a7a9.hip | // !!! This is a file automatically generated by hipify!!!
/*
This example demonstrates how to use the Cuda OpenGL bindings to
dynamically modify a vertex buffer using a Cuda kernel.
The steps are:
1. Create an empty vertex buffer object (VBO)
2. Register the VBO with Cuda
3. Map the VBO for writing from Cuda
4. Run Cuda kernel to modify the vertex positions
5. Unmap the VBO
6. Render the results using OpenGL
Host code
*/
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
static void error_callback(int error, const char* description)
{
fprintf(stderr, "Error: %s\n", description);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GLFW_TRUE);
}
struct vertexColor
{
float x,y;
float r,g,b;
};
GLuint vbo;
struct cudaGraphicsResource *cuda_vbo_resource;
void *d_vbo_buffer = NULL;
const unsigned int window_width = 512;
const unsigned int window_height = 512;
__global__ void triangle_kernel(vertexColor* pos, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the clip coordinates
float u = 2.0 * x / (float) width - 1;
float v = 1 - 2.0 * y / (float) height;
//Calculate a color
if(u<=1 && u>=-1 && v<=1 && v>=-1)
{
if(v <= u+0.5 && v <= -u+0.5 && v >= -0.5)
{
pos[x*width+y].x = u;
pos[x*width+y].y = v;
pos[x*width+y].r = 255;
pos[x*width+y].g = 0;
pos[x*width+y].b = 0;
}
else
{
pos[x*width+y].x = u;
pos[x*width+y].y = v;
pos[x*width+y].r = 0;
pos[x*width+y].g = 0;
pos[x*width+y].b = 0;
}
}
}
void launch_kernel(vertexColor *pos, unsigned int width,
unsigned int height)
{
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);
hipLaunchKernelGGL(( triangle_kernel), dim3(grid), dim3(block), 0, 0, pos, width, height);
}
void runCuda(struct cudaGraphicsResource **vbo_resource)
{
vertexColor *dptr;
hipGraphicsMapResources(1, vbo_resource, 0);
size_t num_bytes;
hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes,
*vbo_resource);
launch_kernel(dptr, window_width, window_height);
hipGraphicsUnmapResources(1, vbo_resource, 0);
}
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags)
{
//Create vertex buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
//Initialize VBO
unsigned int size = window_width * window_height * sizeof(vertexColor);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//Register VBO with CUDA
hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags);
}
int main(void)
{
//------ InitGL---------------//
GLFWwindow* window;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
window = glfwCreateWindow(window_width, window_height, "Simple example", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwSetKeyCallback(window, key_callback);
glfwMakeContextCurrent(window);
glewInit();
glfwSwapInterval(1);
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
glViewport(0,0,window_width, window_height);
//----------InitGL--------------//
hipGLSetGLDevice(0);
createVBO(&vbo, &cuda_vbo_resource, hipGraphicsMapFlagsWriteDiscard);
runCuda(&cuda_vbo_resource);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, sizeof(float)*5, 0);
glColorPointer(3, GL_FLOAT,sizeof(float)*5, (GLvoid*)(sizeof(float)*2));
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
while (!glfwWindowShouldClose(window))
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_POINTS, 0, window_width * window_height);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
| f47ec29f6fbdd0056b22fe3ec5dc8ed02340a7a9.cu | /*
This example demonstrates how to use the Cuda OpenGL bindings to
dynamically modify a vertex buffer using a Cuda kernel.
The steps are:
1. Create an empty vertex buffer object (VBO)
2. Register the VBO with Cuda
3. Map the VBO for writing from Cuda
4. Run Cuda kernel to modify the vertex positions
5. Unmap the VBO
6. Render the results using OpenGL
Host code
*/
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
static void error_callback(int error, const char* description)
{
fprintf(stderr, "Error: %s\n", description);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GLFW_TRUE);
}
struct vertexColor
{
float x,y;
float r,g,b;
};
GLuint vbo;
struct cudaGraphicsResource *cuda_vbo_resource;
void *d_vbo_buffer = NULL;
const unsigned int window_width = 512;
const unsigned int window_height = 512;
__global__ void triangle_kernel(vertexColor* pos, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
//Calculate the clip coordinates
float u = 2.0 * x / (float) width - 1;
float v = 1 - 2.0 * y / (float) height;
//Calculate a color
if(u<=1 && u>=-1 && v<=1 && v>=-1)
{
if(v <= u+0.5 && v <= -u+0.5 && v >= -0.5)
{
pos[x*width+y].x = u;
pos[x*width+y].y = v;
pos[x*width+y].r = 255;
pos[x*width+y].g = 0;
pos[x*width+y].b = 0;
}
else
{
pos[x*width+y].x = u;
pos[x*width+y].y = v;
pos[x*width+y].r = 0;
pos[x*width+y].g = 0;
pos[x*width+y].b = 0;
}
}
}
void launch_kernel(vertexColor *pos, unsigned int width,
unsigned int height)
{
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);
triangle_kernel<<< grid, block>>>(pos, width, height);
}
void runCuda(struct cudaGraphicsResource **vbo_resource)
{
vertexColor *dptr;
cudaGraphicsMapResources(1, vbo_resource, 0);
size_t num_bytes;
cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes,
*vbo_resource);
launch_kernel(dptr, window_width, window_height);
cudaGraphicsUnmapResources(1, vbo_resource, 0);
}
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags)
{
//Create vertex buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
//Initialize VBO
unsigned int size = window_width * window_height * sizeof(vertexColor);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//Register VBO with CUDA
cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags);
}
int main(void)
{
//------ InitGL---------------//
GLFWwindow* window;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
window = glfwCreateWindow(window_width, window_height, "Simple example", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwSetKeyCallback(window, key_callback);
glfwMakeContextCurrent(window);
glewInit();
glfwSwapInterval(1);
glClearColor(0.0, 0.0, 0.0, 1.0);
glDisable(GL_DEPTH_TEST);
glViewport(0,0,window_width, window_height);
//----------InitGL--------------//
cudaGLSetGLDevice(0);
createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard);
runCuda(&cuda_vbo_resource);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, sizeof(float)*5, 0);
glColorPointer(3, GL_FLOAT,sizeof(float)*5, (GLvoid*)(sizeof(float)*2));
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
while (!glfwWindowShouldClose(window))
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_POINTS, 0, window_width * window_height);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
|
fe7ab04699827aa3e31b86e6225191ff01e3d658.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017 Rory mitchell
*/
#include <thrust/binary_search.h>
#include <thrust/count.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <hipcub/hipcub.hpp>
#include <algorithm>
#include <functional>
#include <future>
#include <numeric>
#include "common_hip.cuh"
#include "device_helpers_hip.cuh"
#include "dmlc/timer.h"
#include "gpu_hist_builder_hip.cuh"
namespace xgboost {
namespace tree {
void DeviceGMat::Init(int device_idx, const common::GHistIndexMatrix& gmat,
bst_ulong element_begin, bst_ulong element_end,
bst_ulong row_begin, bst_ulong row_end, int n_bins) {
dh::safe_cuda(hipSetDevice(device_idx));
CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated";
CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1)
<< "row_ptr must be externally allocated";
common::CompressedBufferWriter cbw(n_bins);
std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size());
cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin,
gmat.index.begin() + element_end);
gidx_buffer = host_buffer;
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins);
// row_ptr
thrust::copy(gmat.row_ptr.data() + row_begin,
gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin());
// normalise row_ptr
size_t start = gmat.row_ptr[row_begin];
thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(),
[=] __device__(size_t val) { return val - start; });
}
void DeviceHist::Init(int n_bins_in) {
this->n_bins = n_bins_in;
CHECK(!data.empty()) << "DeviceHist must be externally allocated";
}
void DeviceHist::Reset(int device_idx) {
hipSetDevice(device_idx);
data.fill(bst_gpair());
}
bst_gpair* DeviceHist::GetLevelPtr(int depth) {
return data.data() + n_nodes(depth - 1) * n_bins;
}
int DeviceHist::LevelSize(int depth) { return n_bins * n_nodes_level(depth); }
HistBuilder DeviceHist::GetBuilder() {
return HistBuilder(data.data(), n_bins);
}
HistBuilder::HistBuilder(bst_gpair* ptr, int n_bins)
: d_hist(ptr), n_bins(n_bins) {}
__device__ void HistBuilder::Add(bst_gpair gpair, int gidx, int nidx) const {
int hist_idx = nidx * n_bins + gidx;
atomicAdd(&(d_hist[hist_idx].grad), gpair.grad); // OPTMARK: This and below
// line lead to about 3X
// slowdown due to memory
// dependency and access
// pattern issues.
atomicAdd(&(d_hist[hist_idx].hess), gpair.hess);
}
__device__ bst_gpair HistBuilder::Get(int gidx, int nidx) const {
return d_hist[nidx * n_bins + gidx];
}
GPUHistBuilder::GPUHistBuilder()
: initialised(false),
is_dense(false),
p_last_fmat_(nullptr),
prediction_cache_initialised(false) {}
GPUHistBuilder::~GPUHistBuilder() {
if (initialised) {
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(comms[d_idx]);
dh::safe_cuda(hipSetDevice(dList[d_idx]));
dh::safe_cuda(hipStreamDestroy(*(streams[d_idx])));
}
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(find_split_comms[num_d - 1][d_idx]);
}
}
}
}
void GPUHistBuilder::Init(const TrainParam& param) {
CHECK(param.max_depth < 16) << "Tree depth too large.";
CHECK(param.grow_policy != TrainParam::kLossGuide)
<< "Loss guided growth policy not supported. Use CPU algorithm.";
this->param = param;
CHECK(param.n_gpus != 0) << "Must have at least one device";
int n_devices_all = dh::n_devices_all(param.n_gpus);
for (int device_idx = 0; device_idx < n_devices_all; device_idx++) {
if (!param.silent) {
size_t free_memory = dh::available_memory(device_idx);
const int mb_size = 1048576;
LOG(CONSOLE) << "Device: [" << device_idx << "] "
<< dh::device_name(device_idx) << " with "
<< free_memory / mb_size << " MB available device memory.";
}
}
}
void GPUHistBuilder::InitData(const std::vector<bst_gpair>& gpair,
DMatrix& fmat, // NOLINT
const RegTree& tree) {
// set member num_rows and n_devices for rest of GPUHistBuilder members
info = &fmat.info();
num_rows = info->num_row;
n_devices = dh::n_devices(param.n_gpus, num_rows);
if (!initialised) {
// set dList member
dList.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices();
dList[d_idx] = device_idx;
}
// initialize nccl
comms.resize(n_devices);
streams.resize(n_devices);
dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices,
dList.data())); // initialize communicator
// (One communicator per
// process)
// printf("# NCCL: Using devices\n");
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
streams[d_idx] =
reinterpret_cast<hipStream_t*>(malloc(sizeof(hipStream_t)));
dh::safe_cuda(hipSetDevice(dList[d_idx]));
dh::safe_cuda(hipStreamCreate(streams[d_idx]));
int cudaDev;
int rank;
hipDeviceProp_t prop;
dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev));
dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank));
dh::safe_cuda(hipGetDeviceProperties(&prop, cudaDev));
// printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
// prop.pciBusID, prop.name);
fflush(stdout);
}
// local find_split group of comms for each case of reduced number of GPUs
// to use
find_split_comms.resize(
n_devices,
std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but
// ok, and best to do
// here instead of
// repeatedly
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
dh::safe_nccl(ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d,
dList.data())); // initialize communicator
// (One communicator per
// process)
}
CHECK(fmat.SingleColBlock()) << "grow_gpu_hist: must have single column "
"block. Try setting 'tree_method' "
"parameter to 'exact'";
is_dense = info->num_nonzero == info->num_col * info->num_row;
hmat_.Init(&fmat, param.max_bin);
gmat_.cut = &hmat_;
gmat_.Init(&fmat);
int n_bins = hmat_.row_ptr.back();
int n_features = hmat_.row_ptr.size() - 1;
// deliniate data onto multiple gpus
device_row_segments.push_back(0);
device_element_segments.push_back(0);
bst_uint offset = 0;
bst_uint shard_size = ::ceil(static_cast<double>(num_rows) / n_devices);
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
offset += shard_size;
offset = ::min(offset, num_rows);
device_row_segments.push_back(offset);
device_element_segments.push_back(gmat_.row_ptr[offset]);
}
// Build feature segments
std::vector<int> h_feature_segments;
for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) {
for (int fidx = 0; fidx < n_features; fidx++) {
h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins);
}
}
h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins);
// Construct feature map
std::vector<int> h_gidx_feature_map(n_bins);
for (int fidx = 0; fidx < n_features; fidx++) {
for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) {
h_gidx_feature_map[i] = fidx;
}
}
int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins;
// allocate unique common data that reside on master device (NOTE: None
// currently)
// int master_device=dList[0];
// ba.allocate(master_device, );
// allocate vectors across all devices
temp_memory.resize(n_devices);
hist_vec.resize(n_devices);
nodes.resize(n_devices);
nodes_temp.resize(n_devices);
nodes_child_temp.resize(n_devices);
left_child_smallest.resize(n_devices);
left_child_smallest_temp.resize(n_devices);
feature_flags.resize(n_devices);
fidx_min_map.resize(n_devices);
feature_segments.resize(n_devices);
prediction_cache.resize(n_devices);
position.resize(n_devices);
position_tmp.resize(n_devices);
device_matrix.resize(n_devices);
device_gpair.resize(n_devices);
gidx_feature_map.resize(n_devices);
gidx_fvalue_map.resize(n_devices);
int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices)));
find_split_n_devices =
::min(n_nodes_level(param.max_depth), find_split_n_devices);
int max_num_nodes_device =
n_nodes_level(param.max_depth) / find_split_n_devices;
// num_rows_segment: for sharding rows onto gpus for splitting data
// num_elements_segment: for sharding rows (of elements) onto gpus for
// splitting data
// max_num_nodes_device: for sharding nodes onto gpus for split finding
// All other variables have full copy on gpu, with copy either being
// identical or just current portion (like for histogram) before AllReduce
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
bst_uint num_rows_segment =
device_row_segments[d_idx + 1] - device_row_segments[d_idx];
bst_ulong num_elements_segment =
device_element_segments[d_idx + 1] - device_element_segments[d_idx];
ba.allocate(
device_idx, &(hist_vec[d_idx].data),
n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx],
n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device,
&nodes_child_temp[d_idx], max_num_nodes_device,
&left_child_smallest[d_idx], n_nodes(param.max_depth),
&left_child_smallest_temp[d_idx], max_num_nodes_device,
&feature_flags[d_idx],
n_features, // may change but same on all devices
&fidx_min_map[d_idx],
hmat_.min_val.size(), // constant and same on all devices
&feature_segments[d_idx],
h_feature_segments.size(), // constant and same on all devices
&prediction_cache[d_idx], num_rows_segment, &position[d_idx],
num_rows_segment, &position_tmp[d_idx], num_rows_segment,
&device_gpair[d_idx], num_rows_segment,
&device_matrix[d_idx].gidx_buffer,
common::CompressedBufferWriter::CalculateBufferSize(
num_elements_segment,
n_bins), // constant and same on all devices
&device_matrix[d_idx].row_ptr, num_rows_segment + 1,
&gidx_feature_map[d_idx], n_bins, // constant and same on all devices
&gidx_fvalue_map[d_idx],
hmat_.cut.size()); // constant and same on all devices
// Copy Host to Device (assumes comes after ba.allocate that sets device)
device_matrix[d_idx].Init(
device_idx, gmat_, device_element_segments[d_idx],
device_element_segments[d_idx + 1], device_row_segments[d_idx],
device_row_segments[d_idx + 1], n_bins);
gidx_feature_map[d_idx] = h_gidx_feature_map;
gidx_fvalue_map[d_idx] = hmat_.cut;
feature_segments[d_idx] = h_feature_segments;
fidx_min_map[d_idx] = hmat_.min_val;
// Initialize, no copy
hist_vec[d_idx].Init(n_bins); // init host object
prediction_cache[d_idx].fill(0); // init device object (assumes comes
// after ba.allocate that sets device)
feature_flags[d_idx].fill(1); // init device object (assumes comes after
// ba.allocate that sets device)
}
if (!param.silent) {
const int mb_size = 1048576;
LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << " MB";
}
initialised = true;
}
// copy or init to do every iteration
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
nodes[d_idx].fill(Node());
nodes_temp[d_idx].fill(Node());
nodes_child_temp[d_idx].fill(Node());
position[d_idx].fill(0);
device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx],
gpair.begin() + device_row_segments[d_idx + 1]);
subsample_gpair(&device_gpair[d_idx], param.subsample,
device_row_segments[d_idx]);
hist_vec[d_idx].Reset(device_idx);
// left_child_smallest and left_child_smallest_temp don't need to be
// initialized
}
dh::synchronize_n_devices(n_devices, dList);
p_last_fmat_ = &fmat;
}
void GPUHistBuilder::BuildHist(int depth) {
// dh::Timer time;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t begin = device_element_segments[d_idx];
size_t end = device_element_segments[d_idx + 1];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
auto d_position = position[d_idx].data();
auto d_gpair = device_gpair[d_idx].data();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
auto hist_builder = hist_vec[d_idx].GetBuilder();
dh::TransformLbs(
device_idx, &temp_memory[d_idx], end - begin, d_row_ptr,
row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) {
int nidx = d_position[local_ridx]; // OPTMARK: latency
if (!is_active(nidx, depth)) return;
// Only increment smallest node
bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] &&
is_left_child(nidx)) ||
(!d_left_child_smallest[parent_nidx(nidx)] &&
!is_left_child(nidx));
if (!is_smallest && depth > 0) return;
int gidx = d_gidx[local_idx];
bst_gpair gpair = d_gpair[local_ridx];
hist_builder.Add(gpair, gidx,
nidx); // OPTMARK: This is slow, could use
// shared memory or cache results
// intead of writing to global
// memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
// time.printElapsed("Add Time");
// (in-place) reduce each element of histogram (for only current level) across
// multiple gpus
// TODO(JCM): use out of place with pre-allocated buffer, but then have to
// copy
// back on device
// fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float));
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_nccl(ncclAllReduce(
reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)),
reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)),
hist_vec[d_idx].LevelSize(depth) * sizeof(bst_gpair) / sizeof(float),
ncclFloat, ncclSum, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
// if no NCCL, then presume only 1 GPU, then already correct
// time.printElapsed("Reduce-Add Time");
// Subtraction trick (applied to all devices in same way -- to avoid doing on
// master and then Bcast)
if (depth > 0) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
auto hist_builder = hist_vec[d_idx].GetBuilder();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins;
dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) {
int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2);
bool left_smallest = d_left_child_smallest[parent_nidx(nidx)];
if (left_smallest) {
nidx++; // If left is smallest switch to right child
}
int gidx = idx % hist_builder.n_bins;
bst_gpair parent = hist_builder.Get(gidx, parent_nidx(nidx));
int other_nidx = left_smallest ? nidx - 1 : nidx + 1;
bst_gpair other = hist_builder.Get(gidx, other_nidx);
hist_builder.Add(parent - other, gidx,
nidx); // OPTMARK: This is slow, could use shared
// memory or cache results intead of writing to
// global memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
}
}
template <int BLOCK_THREADS>
__global__ void find_split_kernel(
const bst_gpair* d_level_hist, int* d_feature_segments, int depth,
int n_features, int n_bins, Node* d_nodes, Node* d_nodes_temp,
Node* d_nodes_child_temp, int nodes_offset_device, float* d_fidx_min_map,
float* d_gidx_fvalue_map, GPUTrainingParam gpu_param,
bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) {
typedef hipcub::KeyValuePair<int, float> ArgMaxT;
typedef hipcub::BlockScan<bst_gpair, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef hipcub::BlockReduce<bst_gpair, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
struct UninitializedSplit : cub::Uninitialized<Split> {};
struct UninitializedGpair : cub::Uninitialized<bst_gpair> {};
__shared__ UninitializedSplit uninitialized_split;
Split& split = uninitialized_split.Alias();
__shared__ UninitializedGpair uninitialized_sum;
bst_gpair& shared_sum = uninitialized_sum.Alias();
__shared__ ArgMaxT block_max;
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
split = Split();
}
__syncthreads();
// below two are for accessing full-sized node list stored on each device
// always one block per node, BLOCK_THREADS threads per block
int level_node_idx = blockIdx.x + nodes_offset_device;
int node_idx = n_nodes(depth - 1) + level_node_idx;
for (int fidx = 0; fidx < n_features; fidx++) {
if (colsample && d_feature_flags[fidx] == 0) continue;
int begin = d_feature_segments[level_node_idx * n_features + fidx];
int end = d_feature_segments[level_node_idx * n_features + fidx + 1];
int gidx = (begin - (level_node_idx * n_bins)) + threadIdx.x;
bool thread_active = threadIdx.x < end - begin;
bst_gpair feature_sum = bst_gpair();
for (int reduce_begin = begin; reduce_begin < end;
reduce_begin += BLOCK_THREADS) {
// Scan histogram
bst_gpair bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x]
: bst_gpair();
feature_sum +=
SumReduceT(temp_storage.sum_reduce).Reduce(bin, hipcub::Sum());
}
if (threadIdx.x == 0) {
shared_sum = feature_sum;
}
// __syncthreads(); // no need to synch because below there is a Scan
GpairCallbackOp prefix_op = GpairCallbackOp();
for (int scan_begin = begin; scan_begin < end;
scan_begin += BLOCK_THREADS) {
bst_gpair bin =
thread_active ? d_level_hist[scan_begin + threadIdx.x] : bst_gpair();
BlockScanT(temp_storage.scan)
.ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Calculate gain
bst_gpair parent_sum = d_nodes[node_idx].sum_gradients;
float parent_gain = d_nodes[node_idx].root_gain;
bst_gpair missing = parent_sum - shared_sum;
bool missing_left;
float gain = thread_active
? loss_chg_missing(bin, missing, parent_sum, parent_gain,
gpu_param, missing_left)
: -FLT_MAX;
__syncthreads();
// Find thread with best gain
ArgMaxT tuple(threadIdx.x, gain);
ArgMaxT best =
MaxReduceT(temp_storage.max_reduce).Reduce(tuple, hipcub::ArgMax());
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
float fvalue;
if (threadIdx.x == 0 &&
begin == scan_begin) { // check at start of first tile
fvalue = d_fidx_min_map[fidx];
} else {
fvalue = d_gidx_fvalue_map[gidx - 1];
}
bst_gpair left = missing_left ? bin + missing : bin;
bst_gpair right = parent_sum - left;
split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param);
}
__syncthreads();
} // end scan
} // end over features
// Create node
if (threadIdx.x == 0) {
if (d_nodes_temp == NULL) {
d_nodes[node_idx].split = split;
} else {
d_nodes_temp[blockIdx.x] = d_nodes[node_idx]; // first copy node values
d_nodes_temp[blockIdx.x].split = split; // now assign split
}
// if (depth == 0) {
// split.Print();
// }
Node *Nodeleft, *Noderight;
bool* left_child_smallest;
if (d_nodes_temp == NULL) {
Nodeleft = &d_nodes[left_child_nidx(node_idx)];
Noderight = &d_nodes[right_child_nidx(node_idx)];
left_child_smallest =
&d_left_child_smallest_temp[node_idx]; // NOTE: not per level, even
// though _temp variable name
} else {
Nodeleft = &d_nodes_child_temp[blockIdx.x * 2 + 0];
Noderight = &d_nodes_child_temp[blockIdx.x * 2 + 1];
left_child_smallest = &d_left_child_smallest_temp[blockIdx.x];
}
*Nodeleft =
Node(split.left_sum,
CalcGain(gpu_param, split.left_sum.grad, split.left_sum.hess),
CalcWeight(gpu_param, split.left_sum.grad, split.left_sum.hess));
*Noderight =
Node(split.right_sum,
CalcGain(gpu_param, split.right_sum.grad, split.right_sum.hess),
CalcWeight(gpu_param, split.right_sum.grad, split.right_sum.hess));
// Record smallest node
if (split.left_sum.hess <= split.right_sum.hess) {
*left_child_smallest = true;
} else {
*left_child_smallest = false;
}
}
}
#define MIN_BLOCK_THREADS 32
#define CHUNK_BLOCK_THREADS 32
// MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due
// to CUDA compatibility 35 and above requirement
// for Maximum number of threads per block
#define MAX_BLOCK_THREADS 1024
void GPUHistBuilder::FindSplit(int depth) {
// Specialised based on max_bins
this->FindSplitSpecialize<MIN_BLOCK_THREADS>(depth);
}
template <>
void GPUHistBuilder::FindSplitSpecialize<MAX_BLOCK_THREADS>(int depth) {
LaunchFindSplit<MAX_BLOCK_THREADS>(depth);
}
template <int BLOCK_THREADS>
void GPUHistBuilder::FindSplitSpecialize(int depth) {
if (param.max_bin <= BLOCK_THREADS) {
LaunchFindSplit<BLOCK_THREADS>(depth);
} else {
this->FindSplitSpecialize<BLOCK_THREADS + CHUNK_BLOCK_THREADS>(depth);
}
}
template <int BLOCK_THREADS>
void GPUHistBuilder::LaunchFindSplit(int depth) {
bool colsample =
param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0;
int dosimuljob = 1;
int simuljob = 1; // whether to do job on single GPU and broadcast (0) or to
// do same job on each GPU (1) (could make user parameter,
// but too fine-grained maybe)
int findsplit_shardongpus = 0; // too expensive generally, disable for now
if (findsplit_shardongpus) {
dosimuljob = 0;
// use power of 2 for split finder because nodes are power of 2 (broadcast
// result to remaining devices)
int find_split_n_devices = ::pow(2, ::floor(std::log2(n_devices)));
find_split_n_devices = ::min(n_nodes_level(depth), find_split_n_devices);
int num_nodes_device = n_nodes_level(depth) / find_split_n_devices;
int num_nodes_child_device =
n_nodes_level(depth + 1) / find_split_n_devices;
const int GRID_SIZE = num_nodes_device;
// NOTE: No need to scatter before gather as all devices have same copy of
// nodes, and within find_split_kernel() nodes_temp is given values from
// nodes
// for all nodes (split among devices) find best split per node
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
(const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_temp[d_idx].data(),
nodes_child_temp[d_idx].data(), nodes_offset_device,
fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(),
GPUTrainingParam(param), left_child_smallest_temp[d_idx].data(),
colsample, feature_flags[d_idx].data());
}
// nccl only on devices that did split
dh::synchronize_n_devices(find_split_n_devices, dList);
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_temp[d_idx].data()),
num_nodes_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_child_temp[d_idx].data()),
num_nodes_child_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx]))); // Note offset by n_nodes(depth)
// for recvbuff for child nodes
}
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(left_child_smallest_temp[d_idx].data()),
num_nodes_device * sizeof(bool) / sizeof(char), ncclChar,
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
}
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
if (n_devices > find_split_n_devices && n_devices > 1) {
// if n_devices==1, no need to Bcast
// if find_split_n_devices==1, this is just a copy operation, else it
// copies
// from master to all nodes in case extra devices not involved in split
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int master_device = dList[0];
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth != param.max_depth) { // don't copy over children nodes if no
// more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
}
} else if (simuljob == 0) {
dosimuljob = 0;
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
int d_idx = 0;
int master_device = dList[d_idx];
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
(const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
// broadcast result
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(
ncclBcast(reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char),
ncclChar, master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
dh::safe_cuda(hipStreamSynchronize(*(streams[d_idx])));
}
} else {
dosimuljob = 1;
}
if (dosimuljob) { // if no NCCL or simuljob==1, do this
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
// all GPUs do same work
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
int nodes_offset_device = 0;
hipLaunchKernelGGL(( find_split_kernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), 0, 0,
(const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
}
}
// NOTE: No need to syncrhonize with host as all above pure P2P ops or
// on-device ops
}
void GPUHistBuilder::InitFirstNode(const std::vector<bst_gpair>& gpair) {
// Perform asynchronous reduction on each gpu
std::vector<bst_gpair> device_sums(n_devices);
#pragma omp parallel for num_threads(n_devices)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
auto begin = device_gpair[d_idx].tbegin();
auto end = device_gpair[d_idx].tend();
bst_gpair init = bst_gpair();
auto binary_op = thrust::plus<bst_gpair>();
device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op);
}
bst_gpair sum = bst_gpair();
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
sum += device_sums[d_idx];
}
// Setup first node so all devices have same first node (here done same on all
// devices, or could have done one device and Bcast if worried about exact
// precision issues)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_nodes = nodes[d_idx].data();
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(device_idx, 1, [=] __device__(int idx) {
bst_gpair sum_gradients = sum;
d_nodes[idx] =
Node(sum_gradients,
CalcGain(gpu_param, sum_gradients.grad, sum_gradients.hess),
CalcWeight(gpu_param, sum_gradients.grad, sum_gradients.hess));
});
}
// synch all devices to host before moving on (No, can avoid because BuildHist
// calls another kernel in default stream)
// dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::UpdatePosition(int depth) {
if (is_dense) {
this->UpdatePositionDense(depth);
} else {
this->UpdatePositionSparse(depth);
}
}
void GPUHistBuilder::UpdatePositionDense(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
int n_columns = info->num_col;
size_t begin = device_row_segments[d_idx];
size_t end = device_row_segments[d_idx + 1];
dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx *
static_cast<size_t>(n_columns) + static_cast<size_t>(node.split.findex)];
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position[local_idx] = left_child_nidx(pos);
} else {
d_position[local_idx] = right_child_nidx(pos);
}
});
}
dh::synchronize_n_devices(n_devices, dList);
// dh::safe_cuda(hipDeviceSynchronize());
}
void GPUHistBuilder::UpdatePositionSparse(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
auto d_position_tmp = position_tmp[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_feature_map = gidx_feature_map[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
size_t element_begin = device_element_segments[d_idx];
size_t element_end = device_element_segments[d_idx + 1];
// Update missing direction
dh::launch_n(device_idx, row_end - row_begin,
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
d_position_tmp[local_idx] = pos;
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
d_position_tmp[local_idx] = pos;
return;
} else if (node.split.missing_left) {
d_position_tmp[local_idx] = pos * 2 + 1;
} else {
d_position_tmp[local_idx] = pos * 2 + 2;
}
});
// Update node based on fvalue where exists
// OPTMARK: This kernel is very inefficient for both compute and memory,
// dominated by memory dependency / access patterns
dh::TransformLbs(
device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr,
row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) {
int pos = d_position[local_ridx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx];
int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global
// memory access, maybe setup
// position, gidx, etc. as
// combined structure?
if (findex == node.split.findex) {
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position_tmp[local_ridx] = left_child_nidx(pos);
} else {
d_position_tmp[local_ridx] = right_child_nidx(pos);
}
}
});
position[d_idx] = position_tmp[d_idx];
}
dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::ColSampleTree() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_tree.resize(info->num_col);
std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0);
feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree);
}
void GPUHistBuilder::ColSampleLevel() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_level.resize(feature_set_tree.size());
feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel);
std::vector<int> h_feature_flags(info->num_col, 0);
for (auto fidx : feature_set_level) {
h_feature_flags[fidx] = 1;
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(hipSetDevice(device_idx));
feature_flags[d_idx] = h_feature_flags;
}
dh::synchronize_n_devices(n_devices, dList);
}
bool GPUHistBuilder::UpdatePredictionCache(
const DMatrix* data, std::vector<bst_float>* p_out_preds) {
std::vector<bst_float>& out_preds = *p_out_preds;
if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) {
return false;
}
if (!prediction_cache_initialised) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
prediction_cache[d_idx].copy(out_preds.begin() + row_begin,
out_preds.begin() + row_end);
}
prediction_cache_initialised = true;
}
dh::synchronize_n_devices(n_devices, dList);
float eps = param.learning_rate;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_nodes = nodes[d_idx].data();
auto d_position = position[d_idx].data();
auto d_prediction_cache = prediction_cache[d_idx].data();
dh::launch_n(device_idx, prediction_cache[d_idx].size(),
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
d_prediction_cache[local_idx] += d_nodes[pos].weight * eps;
});
thrust::copy(prediction_cache[d_idx].tbegin(),
prediction_cache[d_idx].tend(), &out_preds[row_begin]);
}
dh::synchronize_n_devices(n_devices, dList);
return true;
}
void GPUHistBuilder::Update(const std::vector<bst_gpair>& gpair,
DMatrix* p_fmat, RegTree* p_tree) {
this->InitData(gpair, *p_fmat, *p_tree);
this->InitFirstNode(gpair);
this->ColSampleTree();
for (int depth = 0; depth < param.max_depth; depth++) {
this->ColSampleLevel();
this->BuildHist(depth);
this->FindSplit(depth);
this->UpdatePosition(depth);
}
// done with multi-GPU, pass back result from master to tree on host
int master_device = dList[0];
dh::safe_cuda(hipSetDevice(master_device));
dense2sparse_tree(p_tree, nodes[0].tbegin(), nodes[0].tend(), param);
}
} // namespace tree
} // namespace xgboost
| fe7ab04699827aa3e31b86e6225191ff01e3d658.cu | /*!
* Copyright 2017 Rory mitchell
*/
#include <thrust/binary_search.h>
#include <thrust/count.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <cub/cub.cuh>
#include <algorithm>
#include <functional>
#include <future>
#include <numeric>
#include "common.cuh"
#include "device_helpers.cuh"
#include "dmlc/timer.h"
#include "gpu_hist_builder.cuh"
namespace xgboost {
namespace tree {
void DeviceGMat::Init(int device_idx, const common::GHistIndexMatrix& gmat,
bst_ulong element_begin, bst_ulong element_end,
bst_ulong row_begin, bst_ulong row_end, int n_bins) {
dh::safe_cuda(cudaSetDevice(device_idx));
CHECK(gidx_buffer.size()) << "gidx_buffer must be externally allocated";
CHECK_EQ(row_ptr.size(), (row_end - row_begin) + 1)
<< "row_ptr must be externally allocated";
common::CompressedBufferWriter cbw(n_bins);
std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size());
cbw.Write(host_buffer.data(), gmat.index.begin() + element_begin,
gmat.index.begin() + element_end);
gidx_buffer = host_buffer;
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), n_bins);
// row_ptr
thrust::copy(gmat.row_ptr.data() + row_begin,
gmat.row_ptr.data() + row_end + 1, row_ptr.tbegin());
// normalise row_ptr
size_t start = gmat.row_ptr[row_begin];
thrust::transform(row_ptr.tbegin(), row_ptr.tend(), row_ptr.tbegin(),
[=] __device__(size_t val) { return val - start; });
}
void DeviceHist::Init(int n_bins_in) {
this->n_bins = n_bins_in;
CHECK(!data.empty()) << "DeviceHist must be externally allocated";
}
void DeviceHist::Reset(int device_idx) {
cudaSetDevice(device_idx);
data.fill(bst_gpair());
}
bst_gpair* DeviceHist::GetLevelPtr(int depth) {
return data.data() + n_nodes(depth - 1) * n_bins;
}
int DeviceHist::LevelSize(int depth) { return n_bins * n_nodes_level(depth); }
HistBuilder DeviceHist::GetBuilder() {
return HistBuilder(data.data(), n_bins);
}
HistBuilder::HistBuilder(bst_gpair* ptr, int n_bins)
: d_hist(ptr), n_bins(n_bins) {}
__device__ void HistBuilder::Add(bst_gpair gpair, int gidx, int nidx) const {
int hist_idx = nidx * n_bins + gidx;
atomicAdd(&(d_hist[hist_idx].grad), gpair.grad); // OPTMARK: This and below
// line lead to about 3X
// slowdown due to memory
// dependency and access
// pattern issues.
atomicAdd(&(d_hist[hist_idx].hess), gpair.hess);
}
__device__ bst_gpair HistBuilder::Get(int gidx, int nidx) const {
return d_hist[nidx * n_bins + gidx];
}
GPUHistBuilder::GPUHistBuilder()
: initialised(false),
is_dense(false),
p_last_fmat_(nullptr),
prediction_cache_initialised(false) {}
GPUHistBuilder::~GPUHistBuilder() {
if (initialised) {
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(comms[d_idx]);
dh::safe_cuda(cudaSetDevice(dList[d_idx]));
dh::safe_cuda(cudaStreamDestroy(*(streams[d_idx])));
}
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
ncclCommDestroy(find_split_comms[num_d - 1][d_idx]);
}
}
}
}
void GPUHistBuilder::Init(const TrainParam& param) {
CHECK(param.max_depth < 16) << "Tree depth too large.";
CHECK(param.grow_policy != TrainParam::kLossGuide)
<< "Loss guided growth policy not supported. Use CPU algorithm.";
this->param = param;
CHECK(param.n_gpus != 0) << "Must have at least one device";
int n_devices_all = dh::n_devices_all(param.n_gpus);
for (int device_idx = 0; device_idx < n_devices_all; device_idx++) {
if (!param.silent) {
size_t free_memory = dh::available_memory(device_idx);
const int mb_size = 1048576;
LOG(CONSOLE) << "Device: [" << device_idx << "] "
<< dh::device_name(device_idx) << " with "
<< free_memory / mb_size << " MB available device memory.";
}
}
}
void GPUHistBuilder::InitData(const std::vector<bst_gpair>& gpair,
DMatrix& fmat, // NOLINT
const RegTree& tree) {
// set member num_rows and n_devices for rest of GPUHistBuilder members
info = &fmat.info();
num_rows = info->num_row;
n_devices = dh::n_devices(param.n_gpus, num_rows);
if (!initialised) {
// set dList member
dList.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices();
dList[d_idx] = device_idx;
}
// initialize nccl
comms.resize(n_devices);
streams.resize(n_devices);
dh::safe_nccl(ncclCommInitAll(comms.data(), n_devices,
dList.data())); // initialize communicator
// (One communicator per
// process)
// printf("# NCCL: Using devices\n");
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
streams[d_idx] =
reinterpret_cast<cudaStream_t*>(malloc(sizeof(cudaStream_t)));
dh::safe_cuda(cudaSetDevice(dList[d_idx]));
dh::safe_cuda(cudaStreamCreate(streams[d_idx]));
int cudaDev;
int rank;
cudaDeviceProp prop;
dh::safe_nccl(ncclCommCuDevice(comms[d_idx], &cudaDev));
dh::safe_nccl(ncclCommUserRank(comms[d_idx], &rank));
dh::safe_cuda(cudaGetDeviceProperties(&prop, cudaDev));
// printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
// prop.pciBusID, prop.name);
fflush(stdout);
}
// local find_split group of comms for each case of reduced number of GPUs
// to use
find_split_comms.resize(
n_devices,
std::vector<ncclComm_t>(n_devices)); // TODO(JCM): Excessive, but
// ok, and best to do
// here instead of
// repeatedly
for (int num_d = 1; num_d <= n_devices;
++num_d) { // loop over number of devices used
dh::safe_nccl(ncclCommInitAll(find_split_comms[num_d - 1].data(), num_d,
dList.data())); // initialize communicator
// (One communicator per
// process)
}
CHECK(fmat.SingleColBlock()) << "grow_gpu_hist: must have single column "
"block. Try setting 'tree_method' "
"parameter to 'exact'";
is_dense = info->num_nonzero == info->num_col * info->num_row;
hmat_.Init(&fmat, param.max_bin);
gmat_.cut = &hmat_;
gmat_.Init(&fmat);
int n_bins = hmat_.row_ptr.back();
int n_features = hmat_.row_ptr.size() - 1;
// deliniate data onto multiple gpus
device_row_segments.push_back(0);
device_element_segments.push_back(0);
bst_uint offset = 0;
bst_uint shard_size = std::ceil(static_cast<double>(num_rows) / n_devices);
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
offset += shard_size;
offset = std::min(offset, num_rows);
device_row_segments.push_back(offset);
device_element_segments.push_back(gmat_.row_ptr[offset]);
}
// Build feature segments
std::vector<int> h_feature_segments;
for (int node = 0; node < n_nodes_level(param.max_depth - 1); node++) {
for (int fidx = 0; fidx < n_features; fidx++) {
h_feature_segments.push_back(hmat_.row_ptr[fidx] + node * n_bins);
}
}
h_feature_segments.push_back(n_nodes_level(param.max_depth - 1) * n_bins);
// Construct feature map
std::vector<int> h_gidx_feature_map(n_bins);
for (int fidx = 0; fidx < n_features; fidx++) {
for (int i = hmat_.row_ptr[fidx]; i < hmat_.row_ptr[fidx + 1]; i++) {
h_gidx_feature_map[i] = fidx;
}
}
int level_max_bins = n_nodes_level(param.max_depth - 1) * n_bins;
// allocate unique common data that reside on master device (NOTE: None
// currently)
// int master_device=dList[0];
// ba.allocate(master_device, );
// allocate vectors across all devices
temp_memory.resize(n_devices);
hist_vec.resize(n_devices);
nodes.resize(n_devices);
nodes_temp.resize(n_devices);
nodes_child_temp.resize(n_devices);
left_child_smallest.resize(n_devices);
left_child_smallest_temp.resize(n_devices);
feature_flags.resize(n_devices);
fidx_min_map.resize(n_devices);
feature_segments.resize(n_devices);
prediction_cache.resize(n_devices);
position.resize(n_devices);
position_tmp.resize(n_devices);
device_matrix.resize(n_devices);
device_gpair.resize(n_devices);
gidx_feature_map.resize(n_devices);
gidx_fvalue_map.resize(n_devices);
int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices)));
find_split_n_devices =
std::min(n_nodes_level(param.max_depth), find_split_n_devices);
int max_num_nodes_device =
n_nodes_level(param.max_depth) / find_split_n_devices;
// num_rows_segment: for sharding rows onto gpus for splitting data
// num_elements_segment: for sharding rows (of elements) onto gpus for
// splitting data
// max_num_nodes_device: for sharding nodes onto gpus for split finding
// All other variables have full copy on gpu, with copy either being
// identical or just current portion (like for histogram) before AllReduce
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
bst_uint num_rows_segment =
device_row_segments[d_idx + 1] - device_row_segments[d_idx];
bst_ulong num_elements_segment =
device_element_segments[d_idx + 1] - device_element_segments[d_idx];
ba.allocate(
device_idx, &(hist_vec[d_idx].data),
n_nodes(param.max_depth - 1) * n_bins, &nodes[d_idx],
n_nodes(param.max_depth), &nodes_temp[d_idx], max_num_nodes_device,
&nodes_child_temp[d_idx], max_num_nodes_device,
&left_child_smallest[d_idx], n_nodes(param.max_depth),
&left_child_smallest_temp[d_idx], max_num_nodes_device,
&feature_flags[d_idx],
n_features, // may change but same on all devices
&fidx_min_map[d_idx],
hmat_.min_val.size(), // constant and same on all devices
&feature_segments[d_idx],
h_feature_segments.size(), // constant and same on all devices
&prediction_cache[d_idx], num_rows_segment, &position[d_idx],
num_rows_segment, &position_tmp[d_idx], num_rows_segment,
&device_gpair[d_idx], num_rows_segment,
&device_matrix[d_idx].gidx_buffer,
common::CompressedBufferWriter::CalculateBufferSize(
num_elements_segment,
n_bins), // constant and same on all devices
&device_matrix[d_idx].row_ptr, num_rows_segment + 1,
&gidx_feature_map[d_idx], n_bins, // constant and same on all devices
&gidx_fvalue_map[d_idx],
hmat_.cut.size()); // constant and same on all devices
// Copy Host to Device (assumes comes after ba.allocate that sets device)
device_matrix[d_idx].Init(
device_idx, gmat_, device_element_segments[d_idx],
device_element_segments[d_idx + 1], device_row_segments[d_idx],
device_row_segments[d_idx + 1], n_bins);
gidx_feature_map[d_idx] = h_gidx_feature_map;
gidx_fvalue_map[d_idx] = hmat_.cut;
feature_segments[d_idx] = h_feature_segments;
fidx_min_map[d_idx] = hmat_.min_val;
// Initialize, no copy
hist_vec[d_idx].Init(n_bins); // init host object
prediction_cache[d_idx].fill(0); // init device object (assumes comes
// after ba.allocate that sets device)
feature_flags[d_idx].fill(1); // init device object (assumes comes after
// ba.allocate that sets device)
}
if (!param.silent) {
const int mb_size = 1048576;
LOG(CONSOLE) << "Allocated " << ba.size() / mb_size << " MB";
}
initialised = true;
}
// copy or init to do every iteration
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
nodes[d_idx].fill(Node());
nodes_temp[d_idx].fill(Node());
nodes_child_temp[d_idx].fill(Node());
position[d_idx].fill(0);
device_gpair[d_idx].copy(gpair.begin() + device_row_segments[d_idx],
gpair.begin() + device_row_segments[d_idx + 1]);
subsample_gpair(&device_gpair[d_idx], param.subsample,
device_row_segments[d_idx]);
hist_vec[d_idx].Reset(device_idx);
// left_child_smallest and left_child_smallest_temp don't need to be
// initialized
}
dh::synchronize_n_devices(n_devices, dList);
p_last_fmat_ = &fmat;
}
void GPUHistBuilder::BuildHist(int depth) {
// dh::Timer time;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t begin = device_element_segments[d_idx];
size_t end = device_element_segments[d_idx + 1];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
auto d_position = position[d_idx].data();
auto d_gpair = device_gpair[d_idx].data();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
auto hist_builder = hist_vec[d_idx].GetBuilder();
dh::TransformLbs(
device_idx, &temp_memory[d_idx], end - begin, d_row_ptr,
row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) {
int nidx = d_position[local_ridx]; // OPTMARK: latency
if (!is_active(nidx, depth)) return;
// Only increment smallest node
bool is_smallest = (d_left_child_smallest[parent_nidx(nidx)] &&
is_left_child(nidx)) ||
(!d_left_child_smallest[parent_nidx(nidx)] &&
!is_left_child(nidx));
if (!is_smallest && depth > 0) return;
int gidx = d_gidx[local_idx];
bst_gpair gpair = d_gpair[local_ridx];
hist_builder.Add(gpair, gidx,
nidx); // OPTMARK: This is slow, could use
// shared memory or cache results
// intead of writing to global
// memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
// time.printElapsed("Add Time");
// (in-place) reduce each element of histogram (for only current level) across
// multiple gpus
// TODO(JCM): use out of place with pre-allocated buffer, but then have to
// copy
// back on device
// fprintf(stderr,"sizeof(bst_gpair)/sizeof(float)=%d\n",sizeof(bst_gpair)/sizeof(float));
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_nccl(ncclAllReduce(
reinterpret_cast<const void*>(hist_vec[d_idx].GetLevelPtr(depth)),
reinterpret_cast<void*>(hist_vec[d_idx].GetLevelPtr(depth)),
hist_vec[d_idx].LevelSize(depth) * sizeof(bst_gpair) / sizeof(float),
ncclFloat, ncclSum, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
// if no NCCL, then presume only 1 GPU, then already correct
// time.printElapsed("Reduce-Add Time");
// Subtraction trick (applied to all devices in same way -- to avoid doing on
// master and then Bcast)
if (depth > 0) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
auto hist_builder = hist_vec[d_idx].GetBuilder();
auto d_left_child_smallest = left_child_smallest[d_idx].data();
int n_sub_bins = (n_nodes_level(depth) / 2) * hist_builder.n_bins;
dh::launch_n(device_idx, n_sub_bins, [=] __device__(int idx) {
int nidx = n_nodes(depth - 1) + ((idx / hist_builder.n_bins) * 2);
bool left_smallest = d_left_child_smallest[parent_nidx(nidx)];
if (left_smallest) {
nidx++; // If left is smallest switch to right child
}
int gidx = idx % hist_builder.n_bins;
bst_gpair parent = hist_builder.Get(gidx, parent_nidx(nidx));
int other_nidx = left_smallest ? nidx - 1 : nidx + 1;
bst_gpair other = hist_builder.Get(gidx, other_nidx);
hist_builder.Add(parent - other, gidx,
nidx); // OPTMARK: This is slow, could use shared
// memory or cache results intead of writing to
// global memory every time in atomic way.
});
}
dh::synchronize_n_devices(n_devices, dList);
}
}
template <int BLOCK_THREADS>
__global__ void find_split_kernel(
const bst_gpair* d_level_hist, int* d_feature_segments, int depth,
int n_features, int n_bins, Node* d_nodes, Node* d_nodes_temp,
Node* d_nodes_child_temp, int nodes_offset_device, float* d_fidx_min_map,
float* d_gidx_fvalue_map, GPUTrainingParam gpu_param,
bool* d_left_child_smallest_temp, bool colsample, int* d_feature_flags) {
typedef cub::KeyValuePair<int, float> ArgMaxT;
typedef cub::BlockScan<bst_gpair, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef cub::BlockReduce<bst_gpair, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
struct UninitializedSplit : cub::Uninitialized<Split> {};
struct UninitializedGpair : cub::Uninitialized<bst_gpair> {};
__shared__ UninitializedSplit uninitialized_split;
Split& split = uninitialized_split.Alias();
__shared__ UninitializedGpair uninitialized_sum;
bst_gpair& shared_sum = uninitialized_sum.Alias();
__shared__ ArgMaxT block_max;
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
split = Split();
}
__syncthreads();
// below two are for accessing full-sized node list stored on each device
// always one block per node, BLOCK_THREADS threads per block
int level_node_idx = blockIdx.x + nodes_offset_device;
int node_idx = n_nodes(depth - 1) + level_node_idx;
for (int fidx = 0; fidx < n_features; fidx++) {
if (colsample && d_feature_flags[fidx] == 0) continue;
int begin = d_feature_segments[level_node_idx * n_features + fidx];
int end = d_feature_segments[level_node_idx * n_features + fidx + 1];
int gidx = (begin - (level_node_idx * n_bins)) + threadIdx.x;
bool thread_active = threadIdx.x < end - begin;
bst_gpair feature_sum = bst_gpair();
for (int reduce_begin = begin; reduce_begin < end;
reduce_begin += BLOCK_THREADS) {
// Scan histogram
bst_gpair bin = thread_active ? d_level_hist[reduce_begin + threadIdx.x]
: bst_gpair();
feature_sum +=
SumReduceT(temp_storage.sum_reduce).Reduce(bin, cub::Sum());
}
if (threadIdx.x == 0) {
shared_sum = feature_sum;
}
// __syncthreads(); // no need to synch because below there is a Scan
GpairCallbackOp prefix_op = GpairCallbackOp();
for (int scan_begin = begin; scan_begin < end;
scan_begin += BLOCK_THREADS) {
bst_gpair bin =
thread_active ? d_level_hist[scan_begin + threadIdx.x] : bst_gpair();
BlockScanT(temp_storage.scan)
.ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Calculate gain
bst_gpair parent_sum = d_nodes[node_idx].sum_gradients;
float parent_gain = d_nodes[node_idx].root_gain;
bst_gpair missing = parent_sum - shared_sum;
bool missing_left;
float gain = thread_active
? loss_chg_missing(bin, missing, parent_sum, parent_gain,
gpu_param, missing_left)
: -FLT_MAX;
__syncthreads();
// Find thread with best gain
ArgMaxT tuple(threadIdx.x, gain);
ArgMaxT best =
MaxReduceT(temp_storage.max_reduce).Reduce(tuple, cub::ArgMax());
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
float fvalue;
if (threadIdx.x == 0 &&
begin == scan_begin) { // check at start of first tile
fvalue = d_fidx_min_map[fidx];
} else {
fvalue = d_gidx_fvalue_map[gidx - 1];
}
bst_gpair left = missing_left ? bin + missing : bin;
bst_gpair right = parent_sum - left;
split.Update(gain, missing_left, fvalue, fidx, left, right, gpu_param);
}
__syncthreads();
} // end scan
} // end over features
// Create node
if (threadIdx.x == 0) {
if (d_nodes_temp == NULL) {
d_nodes[node_idx].split = split;
} else {
d_nodes_temp[blockIdx.x] = d_nodes[node_idx]; // first copy node values
d_nodes_temp[blockIdx.x].split = split; // now assign split
}
// if (depth == 0) {
// split.Print();
// }
Node *Nodeleft, *Noderight;
bool* left_child_smallest;
if (d_nodes_temp == NULL) {
Nodeleft = &d_nodes[left_child_nidx(node_idx)];
Noderight = &d_nodes[right_child_nidx(node_idx)];
left_child_smallest =
&d_left_child_smallest_temp[node_idx]; // NOTE: not per level, even
// though _temp variable name
} else {
Nodeleft = &d_nodes_child_temp[blockIdx.x * 2 + 0];
Noderight = &d_nodes_child_temp[blockIdx.x * 2 + 1];
left_child_smallest = &d_left_child_smallest_temp[blockIdx.x];
}
*Nodeleft =
Node(split.left_sum,
CalcGain(gpu_param, split.left_sum.grad, split.left_sum.hess),
CalcWeight(gpu_param, split.left_sum.grad, split.left_sum.hess));
*Noderight =
Node(split.right_sum,
CalcGain(gpu_param, split.right_sum.grad, split.right_sum.hess),
CalcWeight(gpu_param, split.right_sum.grad, split.right_sum.hess));
// Record smallest node
if (split.left_sum.hess <= split.right_sum.hess) {
*left_child_smallest = true;
} else {
*left_child_smallest = false;
}
}
}
#define MIN_BLOCK_THREADS 32
#define CHUNK_BLOCK_THREADS 32
// MAX_BLOCK_THREADS of 1024 is hard-coded maximum block size due
// to CUDA compatibility 35 and above requirement
// for Maximum number of threads per block
#define MAX_BLOCK_THREADS 1024
void GPUHistBuilder::FindSplit(int depth) {
// Specialised based on max_bins
this->FindSplitSpecialize<MIN_BLOCK_THREADS>(depth);
}
template <>
void GPUHistBuilder::FindSplitSpecialize<MAX_BLOCK_THREADS>(int depth) {
LaunchFindSplit<MAX_BLOCK_THREADS>(depth);
}
template <int BLOCK_THREADS>
void GPUHistBuilder::FindSplitSpecialize(int depth) {
if (param.max_bin <= BLOCK_THREADS) {
LaunchFindSplit<BLOCK_THREADS>(depth);
} else {
this->FindSplitSpecialize<BLOCK_THREADS + CHUNK_BLOCK_THREADS>(depth);
}
}
template <int BLOCK_THREADS>
void GPUHistBuilder::LaunchFindSplit(int depth) {
bool colsample =
param.colsample_bylevel < 1.0 || param.colsample_bytree < 1.0;
int dosimuljob = 1;
int simuljob = 1; // whether to do job on single GPU and broadcast (0) or to
// do same job on each GPU (1) (could make user parameter,
// but too fine-grained maybe)
int findsplit_shardongpus = 0; // too expensive generally, disable for now
if (findsplit_shardongpus) {
dosimuljob = 0;
// use power of 2 for split finder because nodes are power of 2 (broadcast
// result to remaining devices)
int find_split_n_devices = std::pow(2, std::floor(std::log2(n_devices)));
find_split_n_devices = std::min(n_nodes_level(depth), find_split_n_devices);
int num_nodes_device = n_nodes_level(depth) / find_split_n_devices;
int num_nodes_child_device =
n_nodes_level(depth + 1) / find_split_n_devices;
const int GRID_SIZE = num_nodes_device;
// NOTE: No need to scatter before gather as all devices have same copy of
// nodes, and within find_split_kernel() nodes_temp is given values from
// nodes
// for all nodes (split among devices) find best split per node
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>(
(const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), nodes_temp[d_idx].data(),
nodes_child_temp[d_idx].data(), nodes_offset_device,
fidx_min_map[d_idx].data(), gidx_fvalue_map[d_idx].data(),
GPUTrainingParam(param), left_child_smallest_temp[d_idx].data(),
colsample, feature_flags[d_idx].data());
}
// nccl only on devices that did split
dh::synchronize_n_devices(find_split_n_devices, dList);
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_temp[d_idx].data()),
num_nodes_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(nodes_child_temp[d_idx].data()),
num_nodes_child_device * sizeof(Node) / sizeof(char), ncclChar,
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx]))); // Note offset by n_nodes(depth)
// for recvbuff for child nodes
}
dh::safe_nccl(ncclAllGather(
reinterpret_cast<const void*>(left_child_smallest_temp[d_idx].data()),
num_nodes_device * sizeof(bool) / sizeof(char), ncclChar,
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
find_split_comms[find_split_n_devices - 1][d_idx],
*(streams[d_idx])));
}
for (int d_idx = 0; d_idx < find_split_n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
if (n_devices > find_split_n_devices && n_devices > 1) {
// if n_devices==1, no need to Bcast
// if find_split_n_devices==1, this is just a copy operation, else it
// copies
// from master to all nodes in case extra devices not involved in split
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int master_device = dList[0];
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth != param.max_depth) { // don't copy over children nodes if no
// more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
}
} else if (simuljob == 0) {
dosimuljob = 0;
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
int d_idx = 0;
int master_device = dList[d_idx];
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int nodes_offset_device = d_idx * num_nodes_device;
find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>(
(const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
// broadcast result
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
if (depth !=
param.max_depth) { // don't copy over children nodes if no more nodes
dh::safe_nccl(ncclBcast(
reinterpret_cast<void*>(nodes[d_idx].data() + n_nodes(depth)),
n_nodes_level(depth + 1) * sizeof(Node) / sizeof(char), ncclChar,
master_device, comms[d_idx], *(streams[d_idx])));
}
dh::safe_nccl(
ncclBcast(reinterpret_cast<void*>(left_child_smallest[d_idx].data() +
n_nodes(depth - 1)),
n_nodes_level(depth) * sizeof(bool) / sizeof(char),
ncclChar, master_device, comms[d_idx], *(streams[d_idx])));
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
dh::safe_cuda(cudaStreamSynchronize(*(streams[d_idx])));
}
} else {
dosimuljob = 1;
}
if (dosimuljob) { // if no NCCL or simuljob==1, do this
int num_nodes_device = n_nodes_level(depth);
const int GRID_SIZE = num_nodes_device;
// all GPUs do same work
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
int nodes_offset_device = 0;
find_split_kernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS>>>(
(const bst_gpair*)(hist_vec[d_idx].GetLevelPtr(depth)),
feature_segments[d_idx].data(), depth, (info->num_col),
(hmat_.row_ptr.back()), nodes[d_idx].data(), NULL, NULL,
nodes_offset_device, fidx_min_map[d_idx].data(),
gidx_fvalue_map[d_idx].data(), GPUTrainingParam(param),
left_child_smallest[d_idx].data(), colsample,
feature_flags[d_idx].data());
}
}
// NOTE: No need to syncrhonize with host as all above pure P2P ops or
// on-device ops
}
void GPUHistBuilder::InitFirstNode(const std::vector<bst_gpair>& gpair) {
// Perform asynchronous reduction on each gpu
std::vector<bst_gpair> device_sums(n_devices);
#pragma omp parallel for num_threads(n_devices)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
auto begin = device_gpair[d_idx].tbegin();
auto end = device_gpair[d_idx].tend();
bst_gpair init = bst_gpair();
auto binary_op = thrust::plus<bst_gpair>();
device_sums[d_idx] = thrust::reduce(begin, end, init, binary_op);
}
bst_gpair sum = bst_gpair();
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
sum += device_sums[d_idx];
}
// Setup first node so all devices have same first node (here done same on all
// devices, or could have done one device and Bcast if worried about exact
// precision issues)
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_nodes = nodes[d_idx].data();
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(device_idx, 1, [=] __device__(int idx) {
bst_gpair sum_gradients = sum;
d_nodes[idx] =
Node(sum_gradients,
CalcGain(gpu_param, sum_gradients.grad, sum_gradients.hess),
CalcWeight(gpu_param, sum_gradients.grad, sum_gradients.hess));
});
}
// synch all devices to host before moving on (No, can avoid because BuildHist
// calls another kernel in default stream)
// dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::UpdatePosition(int depth) {
if (is_dense) {
this->UpdatePositionDense(depth);
} else {
this->UpdatePositionSparse(depth);
}
}
void GPUHistBuilder::UpdatePositionDense(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
int n_columns = info->num_col;
size_t begin = device_row_segments[d_idx];
size_t end = device_row_segments[d_idx + 1];
dh::launch_n(device_idx, end - begin, [=] __device__(size_t local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx *
static_cast<size_t>(n_columns) + static_cast<size_t>(node.split.findex)];
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position[local_idx] = left_child_nidx(pos);
} else {
d_position[local_idx] = right_child_nidx(pos);
}
});
}
dh::synchronize_n_devices(n_devices, dList);
// dh::safe_cuda(cudaDeviceSynchronize());
}
void GPUHistBuilder::UpdatePositionSparse(int depth) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
auto d_position = position[d_idx].data();
auto d_position_tmp = position_tmp[d_idx].data();
Node* d_nodes = nodes[d_idx].data();
auto d_gidx_feature_map = gidx_feature_map[d_idx].data();
auto d_gidx_fvalue_map = gidx_fvalue_map[d_idx].data();
auto d_gidx = device_matrix[d_idx].gidx;
auto d_row_ptr = device_matrix[d_idx].row_ptr.tbegin();
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
size_t element_begin = device_element_segments[d_idx];
size_t element_end = device_element_segments[d_idx + 1];
// Update missing direction
dh::launch_n(device_idx, row_end - row_begin,
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
if (!is_active(pos, depth)) {
d_position_tmp[local_idx] = pos;
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
d_position_tmp[local_idx] = pos;
return;
} else if (node.split.missing_left) {
d_position_tmp[local_idx] = pos * 2 + 1;
} else {
d_position_tmp[local_idx] = pos * 2 + 2;
}
});
// Update node based on fvalue where exists
// OPTMARK: This kernel is very inefficient for both compute and memory,
// dominated by memory dependency / access patterns
dh::TransformLbs(
device_idx, &temp_memory[d_idx], element_end - element_begin, d_row_ptr,
row_end - row_begin, [=] __device__(size_t local_idx, int local_ridx) {
int pos = d_position[local_ridx];
if (!is_active(pos, depth)) {
return;
}
Node node = d_nodes[pos];
if (node.IsLeaf()) {
return;
}
int gidx = d_gidx[local_idx];
int findex = d_gidx_feature_map[gidx]; // OPTMARK: slowest global
// memory access, maybe setup
// position, gidx, etc. as
// combined structure?
if (findex == node.split.findex) {
float fvalue = d_gidx_fvalue_map[gidx];
if (fvalue <= node.split.fvalue) {
d_position_tmp[local_ridx] = left_child_nidx(pos);
} else {
d_position_tmp[local_ridx] = right_child_nidx(pos);
}
}
});
position[d_idx] = position_tmp[d_idx];
}
dh::synchronize_n_devices(n_devices, dList);
}
void GPUHistBuilder::ColSampleTree() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_tree.resize(info->num_col);
std::iota(feature_set_tree.begin(), feature_set_tree.end(), 0);
feature_set_tree = col_sample(feature_set_tree, param.colsample_bytree);
}
void GPUHistBuilder::ColSampleLevel() {
if (param.colsample_bylevel == 1.0 && param.colsample_bytree == 1.0) return;
feature_set_level.resize(feature_set_tree.size());
feature_set_level = col_sample(feature_set_tree, param.colsample_bylevel);
std::vector<int> h_feature_flags(info->num_col, 0);
for (auto fidx : feature_set_level) {
h_feature_flags[fidx] = 1;
}
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
dh::safe_cuda(cudaSetDevice(device_idx));
feature_flags[d_idx] = h_feature_flags;
}
dh::synchronize_n_devices(n_devices, dList);
}
bool GPUHistBuilder::UpdatePredictionCache(
const DMatrix* data, std::vector<bst_float>* p_out_preds) {
std::vector<bst_float>& out_preds = *p_out_preds;
if (nodes.empty() || !p_last_fmat_ || data != p_last_fmat_) {
return false;
}
if (!prediction_cache_initialised) {
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
prediction_cache[d_idx].copy(out_preds.begin() + row_begin,
out_preds.begin() + row_end);
}
prediction_cache_initialised = true;
}
dh::synchronize_n_devices(n_devices, dList);
float eps = param.learning_rate;
for (int d_idx = 0; d_idx < n_devices; d_idx++) {
int device_idx = dList[d_idx];
size_t row_begin = device_row_segments[d_idx];
size_t row_end = device_row_segments[d_idx + 1];
auto d_nodes = nodes[d_idx].data();
auto d_position = position[d_idx].data();
auto d_prediction_cache = prediction_cache[d_idx].data();
dh::launch_n(device_idx, prediction_cache[d_idx].size(),
[=] __device__(int local_idx) {
int pos = d_position[local_idx];
d_prediction_cache[local_idx] += d_nodes[pos].weight * eps;
});
thrust::copy(prediction_cache[d_idx].tbegin(),
prediction_cache[d_idx].tend(), &out_preds[row_begin]);
}
dh::synchronize_n_devices(n_devices, dList);
return true;
}
void GPUHistBuilder::Update(const std::vector<bst_gpair>& gpair,
DMatrix* p_fmat, RegTree* p_tree) {
this->InitData(gpair, *p_fmat, *p_tree);
this->InitFirstNode(gpair);
this->ColSampleTree();
for (int depth = 0; depth < param.max_depth; depth++) {
this->ColSampleLevel();
this->BuildHist(depth);
this->FindSplit(depth);
this->UpdatePosition(depth);
}
// done with multi-GPU, pass back result from master to tree on host
int master_device = dList[0];
dh::safe_cuda(cudaSetDevice(master_device));
dense2sparse_tree(p_tree, nodes[0].tbegin(), nodes[0].tend(), param);
}
} // namespace tree
} // namespace xgboost
|
141097f9b95287091f0df97392d3a0793e433829.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[8], b[8];
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-16);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-16);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) {
b[0] = input[__iter_5__+N*(__iter_4__+M*(0))];
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))];
t[0] = input[__iter_5__+N*(__iter_4__+M*(2))];
t[4] = input[__iter_5__+N*(__iter_4__+M*(3))];
}
// Rest of the computation
for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
b[4] = b[0];
b[0] = tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[0];
t[0] = t[4];
t[4] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))){
float __temp_3__ = (tilevar[0][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[0][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[4]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[0]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[0]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[4]);
float __temp_62__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
b[5] = b[1];
b[1] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[1];
t[1] = t[5];
t[5] = __temp_63__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){
float __temp_3__ = (tilevar[1][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[1][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[5]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[1]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[1]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[5]);
float __temp_62__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
b[6] = b[2];
b[2] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[2];
t[2] = t[6];
t[6] = __temp_63__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))){
float __temp_3__ = (tilevar[2][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[2][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[6]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[2]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[2]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[6]);
float __temp_62__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
b[7] = b[3];
b[3] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[3];
t[3] = t[7];
t[7] = __temp_63__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(N-3))){
float __temp_3__ = (tilevar[3][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[3][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[7]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[3]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[3]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[7]);
float __temp_62__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d13pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-16);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
for (int i = 0; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, __var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0; i < 125; i++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, __var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| 141097f9b95287091f0df97392d3a0793e433829.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[8], b[8];
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-16);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-16);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) {
b[0] = input[__iter_5__+N*(__iter_4__+M*(0))];
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(1))];
t[0] = input[__iter_5__+N*(__iter_4__+M*(2))];
t[4] = input[__iter_5__+N*(__iter_4__+M*(3))];
}
// Rest of the computation
for (int __iter_2__ = 2; __iter_2__ < L-2; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
b[4] = b[0];
b[0] = tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[0];
t[0] = t[4];
t[4] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+2))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-3))){
float __temp_3__ = (tilevar[0][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[0][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[4]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[0]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[0]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[4]);
float __temp_62__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
b[5] = b[1];
b[1] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[1];
t[1] = t[5];
t[5] = __temp_63__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-3)) ){
float __temp_3__ = (tilevar[1][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[1][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[5]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[1]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[1]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[5]);
float __temp_62__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
b[6] = b[2];
b[2] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[2];
t[2] = t[6];
t[6] = __temp_63__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+6),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+6),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(N-3))){
float __temp_3__ = (tilevar[2][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[2][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[6]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[2]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[2]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[6]);
float __temp_62__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
b[7] = b[3];
b[3] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[3];
t[3] = t[7];
t[7] = __temp_63__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+8),2) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(M-3)) & __iter_5__ >= FORMA_MAX((__iter_0__+8),2) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(N-3))){
float __temp_3__ = (tilevar[3][__iter_5__+2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_7__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_8__ = (0.083000f * __temp_3__ + 0.083000f * __temp_7__);
float __temp_12__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_13__ = (__temp_8__ + 0.083000f * __temp_12__);
float __temp_17__ = (tilevar[3][__iter_5__-2-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_18__ = (__temp_13__ + 0.083000f * __temp_17__);
float __temp_22__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+2-__iter_1__)]);
float __temp_23__ = (__temp_18__ + 0.083000f * __temp_22__);
float __temp_27__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_28__ = (__temp_23__ + 0.083000f * __temp_27__);
float __temp_32__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_33__ = (__temp_28__ + 0.083000f * __temp_32__);
float __temp_37__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-2-__iter_1__)]);
float __temp_38__ = (__temp_33__ + 0.083000f * __temp_37__);
float __temp_43__ = (__temp_38__ + 0.083000f * t[7]);
float __temp_48__ = (__temp_43__ + 0.083000f * t[3]);
float __temp_53__ = (__temp_48__ + 0.083000f * b[3]);
float __temp_58__ = (__temp_53__ + 0.083000f * b[7]);
float __temp_62__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ - 0.996000f * __temp_62__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-6,0))] = __temp_63__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d13pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-16);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
for (int i = 0; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (__var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
for (int n = 0; n < 5; n++) {
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
for (int i = 0; i < 125; i++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (__var_1__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, input);
}
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
}
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
d82c3849a5a72410bee7d82ba7dfe9f606bf3322.hip | // !!! This is a file automatically generated by hipify!!!
#include <hiprand/hiprand_kernel.h>
__device__ float gamma(float k, hiprandState_t* state_ptr){
// gamma distribution
float x;
if(k<1){ // Weibull algorithm
float c=1/k;
float d=(1-k)*powf(k, 1/(c-1));
float z;
float e;
do{
z=-logf(hiprand_uniform(state_ptr));
e=-logf(hiprand_uniform(state_ptr));
x=powf(z, c);
} while(z+e<d+x);
}else{ // Cheng's algorithm
float b=k-logf(4.0f);
float l=sqrtf(2*k-1);
float c=1+logf(4.5f);
float u, v, y, z, r;
do{
u=hiprand_uniform(state_ptr);
v=hiprand_uniform(state_ptr);
y=-logf(1/v-1)/l;
x=k*expf(y);
z=u*v*v;
r=b+(k+l)*y-x;
} while(r<4.5f*z-c && r<logf(z));
}
return x;
}
| d82c3849a5a72410bee7d82ba7dfe9f606bf3322.cu |
#include <curand_kernel.h>
__device__ float gamma(float k, curandState_t* state_ptr){
// gamma distribution
float x;
if(k<1){ // Weibull algorithm
float c=1/k;
float d=(1-k)*powf(k, 1/(c-1));
float z;
float e;
do{
z=-logf(curand_uniform(state_ptr));
e=-logf(curand_uniform(state_ptr));
x=powf(z, c);
} while(z+e<d+x);
}else{ // Cheng's algorithm
float b=k-logf(4.0f);
float l=sqrtf(2*k-1);
float c=1+logf(4.5f);
float u, v, y, z, r;
do{
u=curand_uniform(state_ptr);
v=curand_uniform(state_ptr);
y=-logf(1/v-1)/l;
x=k*expf(y);
z=u*v*v;
r=b+(k+l)*y-x;
} while(r<4.5f*z-c && r<logf(z));
}
return x;
}
|
7af373bc04bee55405e33dc14c779ebf264d358d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tensor/gpu_handle.h"
#include "util/gnn_macros.h"
#include "util/mem_holder.h"
#include "tbb/tbb.h"
namespace gnn
{
__global__ void SetupRandKernel(hiprandState_t *state, unsigned long long seed)
{
const unsigned int tidx = NUM_RND_THREADS_PER_BLOCK * blockIdx.x + threadIdx.x;
/* Each thread gets same seed, a different sequence number,
no offset */
hiprand_init(seed, tidx, 0, &state[tidx]);
}
void GpuHandle::Init(int dev_id, unsigned int _streamcnt)
{
tbb::task_scheduler_init init(4);
streamcnt = _streamcnt;
hipDeviceReset();
hipSetDevice(dev_id);
cublashandles = new hipblasHandle_t[streamcnt];
cusparsehandles = new hipsparseHandle_t[streamcnt];
inUse = new bool[streamcnt];
while (!resources.empty())
resources.pop();
for (unsigned int id = 0; id < streamcnt; ++id)
{
hipblasCreate(&cublashandles[id]);
hipsparseCreate(&cusparsehandles[id]);
inUse[id] = false;
resources.push(id);
}
hipStreamCreate(&cudaRandStream);
hiprandCreateGenerator(&curandgenerator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curandgenerator, time(NULL));
hipMalloc((void **)&devRandStates, NUM_RND_STREAMS * sizeof(hiprandState_t));
hipLaunchKernelGGL(( SetupRandKernel), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, devRandStates, 1 + time(NULL)*2);
}
GpuContext GpuHandle::AquireCtx()
{
r_loc.lock();
ASSERT(resources.size(), "running out of gpu resources");
int cur_pos = resources.front();
resources.pop();
r_loc.unlock();
ASSERT(!inUse[cur_pos], "logic error: in-use resource is found available");
inUse[cur_pos] = true;
hipblasSetStream(cublashandles[cur_pos], cudaStreamPerThread);
hipsparseSetStream(cusparsehandles[cur_pos], cudaStreamPerThread);
return GpuContext(cur_pos, cublashandles[cur_pos], cusparsehandles[cur_pos]);
}
void GpuHandle::ReleaseCtx(const GpuContext& ctx)
{
r_loc.lock();
resources.push(ctx.id);
ASSERT(inUse[ctx.id], "logic error: in-use resource is not recorded, or you are releasing same resource multiple times");
inUse[ctx.id] = false;
r_loc.unlock();
}
void GpuHandle::Destroy()
{
hipDeviceSynchronize();
hipStreamDestroy(cudaRandStream);
for (unsigned int id = 0; id < streamcnt; ++id)
{
hipblasDestroy(cublashandles[id]);
hipsparseDestroy(cusparsehandles[id]);
}
delete[] cublashandles;
delete[] cusparsehandles;
delete[] inUse;
hiprandDestroyGenerator(curandgenerator);
hipFree(devRandStates);
streamcnt = 0U;
}
hiprandState_t* GpuHandle::devRandStates = NULL;
hipblasHandle_t* GpuHandle::cublashandles = NULL;
hipsparseHandle_t* GpuHandle::cusparsehandles = NULL;
hiprandGenerator_t GpuHandle::curandgenerator;
unsigned int GpuHandle::streamcnt = 1U;
std::queue< int > GpuHandle::resources;
std::mutex GpuHandle::r_loc;
std::mutex GpuHandle::rand_lock;
bool* GpuHandle::inUse = NULL;
hipStream_t GpuHandle::cudaRandStream;
} | 7af373bc04bee55405e33dc14c779ebf264d358d.cu | #include "tensor/gpu_handle.h"
#include "util/gnn_macros.h"
#include "util/mem_holder.h"
#include "tbb/tbb.h"
namespace gnn
{
__global__ void SetupRandKernel(curandState_t *state, unsigned long long seed)
{
const unsigned int tidx = NUM_RND_THREADS_PER_BLOCK * blockIdx.x + threadIdx.x;
/* Each thread gets same seed, a different sequence number,
no offset */
curand_init(seed, tidx, 0, &state[tidx]);
}
void GpuHandle::Init(int dev_id, unsigned int _streamcnt)
{
tbb::task_scheduler_init init(4);
streamcnt = _streamcnt;
cudaDeviceReset();
cudaSetDevice(dev_id);
cublashandles = new cublasHandle_t[streamcnt];
cusparsehandles = new cusparseHandle_t[streamcnt];
inUse = new bool[streamcnt];
while (!resources.empty())
resources.pop();
for (unsigned int id = 0; id < streamcnt; ++id)
{
cublasCreate(&cublashandles[id]);
cusparseCreate(&cusparsehandles[id]);
inUse[id] = false;
resources.push(id);
}
cudaStreamCreate(&cudaRandStream);
curandCreateGenerator(&curandgenerator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curandgenerator, time(NULL));
cudaMalloc((void **)&devRandStates, NUM_RND_STREAMS * sizeof(curandState_t));
SetupRandKernel<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(devRandStates, 1 + time(NULL)*2);
}
GpuContext GpuHandle::AquireCtx()
{
r_loc.lock();
ASSERT(resources.size(), "running out of gpu resources");
int cur_pos = resources.front();
resources.pop();
r_loc.unlock();
ASSERT(!inUse[cur_pos], "logic error: in-use resource is found available");
inUse[cur_pos] = true;
cublasSetStream(cublashandles[cur_pos], cudaStreamPerThread);
cusparseSetStream(cusparsehandles[cur_pos], cudaStreamPerThread);
return GpuContext(cur_pos, cublashandles[cur_pos], cusparsehandles[cur_pos]);
}
void GpuHandle::ReleaseCtx(const GpuContext& ctx)
{
r_loc.lock();
resources.push(ctx.id);
ASSERT(inUse[ctx.id], "logic error: in-use resource is not recorded, or you are releasing same resource multiple times");
inUse[ctx.id] = false;
r_loc.unlock();
}
void GpuHandle::Destroy()
{
cudaDeviceSynchronize();
cudaStreamDestroy(cudaRandStream);
for (unsigned int id = 0; id < streamcnt; ++id)
{
cublasDestroy_v2(cublashandles[id]);
cusparseDestroy(cusparsehandles[id]);
}
delete[] cublashandles;
delete[] cusparsehandles;
delete[] inUse;
curandDestroyGenerator(curandgenerator);
cudaFree(devRandStates);
streamcnt = 0U;
}
curandState_t* GpuHandle::devRandStates = NULL;
cublasHandle_t* GpuHandle::cublashandles = NULL;
cusparseHandle_t* GpuHandle::cusparsehandles = NULL;
curandGenerator_t GpuHandle::curandgenerator;
unsigned int GpuHandle::streamcnt = 1U;
std::queue< int > GpuHandle::resources;
std::mutex GpuHandle::r_loc;
std::mutex GpuHandle::rand_lock;
bool* GpuHandle::inUse = NULL;
cudaStream_t GpuHandle::cudaRandStream;
} |
6c06d5825d6abd8283851fec67472654bcbe28c1.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const bool* values_in,
bool* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
hipStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
| 6c06d5825d6abd8283851fec67472654bcbe28c1.cu | /* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const bool* values_in,
bool* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
cudaStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
|
9ca9b2b95bb162b3e88f04d4df9018a0fb83dc65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cstdio>
using namespace std;
__global__ void maxi(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int max=0;
for(int i=block;i<min(256+block,n);i++)
{
if(max<a[i])
{
max=a[i];
}
}
b[blockIdx.x]=max;
}
int main()
{
cout<<"Enter the size of array"<<endl;
int n;
cin>>n;
int a[n];
hipEvent_t start,end;
for(int i=0;i<n;i++)
{
cout<<"Enter the numbers: ";
cin>>a[i];
}
int *ad,*bd;
int size=n*sizeof(int);
hipMalloc(&ad,size);
hipMemcpy(ad,a,size,hipMemcpyHostToDevice);
int grids=ceil(n*1.0f/256.0f);
hipMalloc(&bd,grids*sizeof(int));
dim3 grid(grids,1);
dim3 block(1,1);
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
while(n>1)
{
hipLaunchKernelGGL(( maxi), dim3(grids),dim3(block), 0, 0, ad,bd,n);
n=ceil(n*1.0f/256.0f);
hipMemcpy(ad,bd,n*sizeof(int),hipMemcpyDeviceToDevice);
}
hipEventRecord(end);
hipEventSynchronize(end);
float time=0;
hipEventElapsedTime(&time,start,end);
int ans[2];
hipMemcpy(ans,ad,4,hipMemcpyDeviceToHost);
cout<<"The maximum element is"<<ans[0]<<endl;
cout<<"The time required for it is";
cout<<time<<endl;
}
| 9ca9b2b95bb162b3e88f04d4df9018a0fb83dc65.cu | #include<iostream>
#include<cstdio>
using namespace std;
__global__ void maxi(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int max=0;
for(int i=block;i<min(256+block,n);i++)
{
if(max<a[i])
{
max=a[i];
}
}
b[blockIdx.x]=max;
}
int main()
{
cout<<"Enter the size of array"<<endl;
int n;
cin>>n;
int a[n];
cudaEvent_t start,end;
for(int i=0;i<n;i++)
{
cout<<"Enter the numbers: ";
cin>>a[i];
}
int *ad,*bd;
int size=n*sizeof(int);
cudaMalloc(&ad,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
int grids=ceil(n*1.0f/256.0f);
cudaMalloc(&bd,grids*sizeof(int));
dim3 grid(grids,1);
dim3 block(1,1);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
while(n>1)
{
maxi<<<grids,block>>>(ad,bd,n);
n=ceil(n*1.0f/256.0f);
cudaMemcpy(ad,bd,n*sizeof(int),cudaMemcpyDeviceToDevice);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
float time=0;
cudaEventElapsedTime(&time,start,end);
int ans[2];
cudaMemcpy(ans,ad,4,cudaMemcpyDeviceToHost);
cout<<"The maximum element is"<<ans[0]<<endl;
cout<<"The time required for it is";
cout<<time<<endl;
}
|
952fe6b93cf260cd23dc344083a8d228b45724c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 16
__global__ void add( int *a, int *b, int *c ){
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the CPU
a = (int*)malloc( N * sizeof(int) );
b = (int*)malloc( N * sizeof(int) );
c = (int*)malloc( N * sizeof(int) );
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, N * sizeof(int) );
hipMalloc( (void**)&dev_b, N * sizeof(int) );
hipMalloc( (void**)&dev_c, N * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
srand ( time(NULL) );
for (int i=0; i<N; i++) {
a[i] = rand()%256;
b[i] = rand()%256;
}
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice );
hipLaunchKernelGGL(( add), dim3(1), dim3(N), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost );
// verify that the GPU did the work we requested
bool success = true;
for (int i=0; i<N; i++) {
if ((a[i] + b[i]) != c[i]) {
printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] );
success = false;
}
}
if (success) printf( "We did it!\n" );
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
| 952fe6b93cf260cd23dc344083a8d228b45724c6.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define N 16
__global__ void add( int *a, int *b, int *c ){
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the CPU
a = (int*)malloc( N * sizeof(int) );
b = (int*)malloc( N * sizeof(int) );
c = (int*)malloc( N * sizeof(int) );
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
// fill the arrays 'a' and 'b' on the CPU
srand ( time(NULL) );
for (int i=0; i<N; i++) {
a[i] = rand()%256;
b[i] = rand()%256;
}
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice );
add<<<1, N>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost );
// verify that the GPU did the work we requested
bool success = true;
for (int i=0; i<N; i++) {
if ((a[i] + b[i]) != c[i]) {
printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] );
success = false;
}
}
if (success) printf( "We did it!\n" );
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
8d7a35611dd6322254371824b5308e3f51da0ced.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <thrust/scan.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define SCAN_BLOCK_DIM 256 // needed by sharedMemExclusiveScan implementation
#include "exclusiveScan.cu_inl"
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
/*
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles() {
//int table[1024][1024] = {0};
__shared__ int table[1024][1024];
int index = blockIdx.x * blockDim.x + threadIdx.x;
//__shared__ float shmImgPtr[256][180];
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
//printf("screenMaxX - screenMinX: %d\n", screenMaxX- screenMinX);
// for all pixels in the bonding box
for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) {
for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) {
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float diffX = p.x - pixelCenterNorm.x;
float diffY = p.y - pixelCenterNorm.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[index];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist <= maxDist)
table[pixelX][pixelY]++;
//shadePixel(index, pixelCenterNorm, p, imgPtr,);//&shmImgPtr[threadIdx.x][4 * a]);
//imgPtr++;
}
}
}
*/
/* prefixSum provided by nVidia, but I failed to use this function */
////////////////////////////////////////////////////////////////////////////////////////
__device__ void prescan(uint *g_odata, uint *g_idata, int n)
{
__shared__ uint temp[512];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
int ai = thid;
int bi = thid + (n/2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(ai);
temp[ai + bankOffsetA] = g_idata[ai];
temp[bi + bankOffsetB] = g_idata[bi];
for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid==0) {
//temp[n 1 /*+ CONFLICT_FREE_OFFSET(n - 1)*/ ] = 0;
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[ai] = temp[ai + bankOffsetA];
g_odata[bi] = temp[bi + bankOffsetB];
}
__global__ void kernelRenderCircles() {
/* this queue is intended for remembering circle index */
int queue[35];
int queueIndex = 0;
/* These sharemd memory array will be used in prefixSum function */
__shared__ uint shmQueue[256]; //input of prefixSum : the role of this array is to divide index of order[] array
__shared__ uint prefixSum[256]; //output of prefixSum
__shared__ uint prefixSumScratch[2 * 256]; //The comments inside a prefixSum library file says we need this to calculate it
/* This array contains circle indices that is colored inside a threa block boundary(32 x 32 pixels),
and they are sorted by ascending order */
__shared__ int order[2900];
/* Statement shown in line 542(extern keyword) used for dynamic allocation of shared memory.
Reducing the size of shared memory array has positive impact on the execution time.
From the fact that each image(e.g., rgb, littlebig, rand10k, ...) needs different array size,
I tried to allocate different array size according to image(e.g., rgb, littlebing, ...),
but when I use it, it gives me wrong result. I don't know why. */
//extern __shared__ int order[];
int blockThreadIndex = blockDim.x * threadIdx.y + threadIdx.x;
int numCircles = cuConstRendererParams.numCircles;
int threadsPerBlock = blockDim.x * blockDim.y;
/* each thread will handle the number of circles stored in variable 'circle' */
int circle = (numCircles + threadsPerBlock - 1) / threadsPerBlock;
/* imageX and imageY are the location of image pixels assigned for this thread within boundary. */
//int imageX = blockIdx.x * blockDim.x + threadIdx.x; // This is intended for assiging each thread 1x1 pixel.
//int imageY = blockIdx.y * blockDim.y + threadIdx.y;
/*Each thread will deal with 2x2 pixels, not 1x1 pixel by multiplying 2.*/
int imageX = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
int imageY = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
/* Thess variables describe pixel boundary of thread block. */
//int pixelXFrom = blockDim.x * blockIdx.x; //e.g., 0, 16, 32, ...
//int pixelXTo = blockDim.x * (blockIdx.x + 1) - 1; // 15, 31, 63, ...
//int pixelYFrom = blockDim.y * blockIdx.y;
//int pixelYTo = blockDim.y * (blockIdx.y + 1) - 1;
/* Number 2 is intended for 32 x 32 pixels, not 16 x 16 pixels. */
int pixelXFrom = blockDim.x * blockIdx.x * 2; //e.g., 0, 64, 128, ...
int pixelXTo = 2 * blockDim.x * (blockIdx.x + 1) - 1; // 63, 127, 255, ...
int pixelYFrom = blockDim.y * blockIdx.y * 2;
int pixelYTo = 2 * blockDim.y * (blockIdx.y + 1) - 1;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
/* each thread only handles their pixel boundary(2 x 2 pixels),
and these are used to copy global memory data into local memory. */
float4 *imgPtr0 = (float4*)(&cuConstRendererParams.imageData[4 * (imageY * imageWidth + imageX)]);
float4 *imgPtr1 = (float4*)(&cuConstRendererParams.imageData[4 * (imageY * imageWidth + imageX + 1)]);
float4 *imgPtr2 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX)]);
float4 *imgPtr3 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 1)]);
/*
float4 *imgPtr4 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX)]);
float4 *imgPtr5 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 1)]);
float4 *imgPtr6 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 2)]);
float4 *imgPtr7 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 3)]);
float4 *imgPtr8 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2)* imageWidth + imageX)]);
float4 *imgPtr9 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2) * imageWidth + imageX + 1)]);
float4 *imgPtr10 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2) * imageWidth + imageX + 2)]);
float4 *imgPtr11 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2)* imageWidth + imageX + 3)]);
float4 *imgPtr12 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3) * imageWidth + imageX)]);
float4 *imgPtr13 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3) * imageWidth + imageX + 1)]);
float4 *imgPtr14 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3)* imageWidth + imageX + 2)]);
float4 *imgPtr15 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3)* imageWidth + imageX + 3)]);
*/
/* Copy rgb data in global memory into local memory */
float4 localImgData0 = *imgPtr0;
float4 localImgData1 = *imgPtr1;
float4 localImgData2 = *imgPtr2;
float4 localImgData3 = *imgPtr3;
/*
float4 localImgData4 = *imgPtr4;
float4 localImgData5 = *imgPtr5;
float4 localImgData6 = *imgPtr6;
float4 localImgData7 = *imgPtr7;
float4 localImgData8 = *imgPtr8;
float4 localImgData9 = *imgPtr9;
float4 localImgData10 = *imgPtr10;
float4 localImgData11 = *imgPtr11;
float4 localImgData12 = *imgPtr12;
float4 localImgData13 = *imgPtr13;
float4 localImgData14 = *imgPtr14;
float4 localImgData15 = *imgPtr15;
*/
/* Each thread deals with circle indices(From and To) shown in below to
check whether they are within or across the boundary of this thread block */
/* When there exist only three circles to be drawn, then each thread has variable
circleIndexFrom: 0, 1, 2, 3, ... , circleIndexTo: 0, 1, 2, 3, ... , which means
, in this case, thread number from 3 to 255 will execute for loop described in below.
However, it doesn't matter because variable "p" and "rad"(in for looop) will have zero valuee */
int circleIndexFrom = blockThreadIndex * circle;
int circleIndexTo = (blockThreadIndex + 1) * circle - 1;
for (int i = circleIndexFrom; i <= circleIndexTo; i++) {
int index3 = 3 * i;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
//float newRadWidth = rad * imageWidth;
//float newRadHeight = rad * imageHeight;
/* "rad" is normalized to 0 ~ 1023.xxxxxx */
float extendXLeft = pixelXFrom - (rad * imageWidth);
float extendXRight = pixelXTo + (rad * imageWidth);
float extendYTop = pixelYFrom - (rad * imageHeight);
float extendYBottom = pixelYTo + (rad * imageHeight);
/* "circle coordinate" is normailzed to 0 ~ 1023.xxxxxx */
float circleX = p.x * imageWidth;
float circleY = p.y * imageHeight;
/* This will check whether the circle index "i" exist within or across the boundary of this thread block's pixels */
/* Multiplying the value 1.01 and 0.99 is very important to work correctly,
Due to the small error from above(maybe the gap between normalized value(~1023) and floating value(0.xxx),
I have to multply these constant, it is similar to extend the boundary of thread block's pixel */
/* I found this fact unexpectedly, because some of the results show me "correctness failed", others "correctness pass" */
if (extendXLeft <= circleX * 1.01 && extendXRight >= circleX * 0.99 && extendYTop <= circleY * 1.01 && extendYBottom >= circleY * 0.99) {
queue[queueIndex++] = i;
}
}
/* Each thread copy their queueIndex(which has the number of circles inside or across thread block's boundary)
into shared memory array "shmQueue[]" */
/* For example, if each thread (here three) has queueIndex 3, 7, 5 respectively,
then, shmQueue[0] = 3, shmQueue[1] = 7, shmQueue[2] = 5 */
shmQueue[blockThreadIndex] = queueIndex;
__syncthreads();
/* Because "sharedMemExclusiveScan uses input data "shmQueue[]", we have to guarantee
that when "sharedMemExclusiveScan is called, "shmQueue[]" must be in consistent state,
which means all data had to be copied into this array at this point */
/* "prescan" is prefixSum algorithm providied by nVidia. I tried to use this to get
fast execution time, but failed to get correct result. Maybe I missed something. */
//prescan(prefixSum, shmQueue, 256);
//__syncthreads();
/* All threads, together, in this thread block will calculate prefixSum. */
/* For example, from the above example, the final result of this functions is:
[0] [1] [2]
shmQueue[] 3 7 5
prefixSum[] 0 3 10
*/
sharedMemExclusiveScan(blockThreadIndex, shmQueue, prefixSum, prefixSumScratch, 256);
__syncthreads();
/* We have to guarantee that all threads must be located at this point. This is because
if some of threads are still in shareMemExclusiveScan, which means
they are still calculating prefixSum, other threads that is executing below code will
get incorrect value of prefixSum[]*/
/* "globalIndex" will be the total number of circles that will be processed by this thread block */
int globalIndex = prefixSum[255] + shmQueue[255];
/* By using prefixSum[] array it can calculate where to put its data */
/* For example, because threadIndex0 owns 3 circles (shown in above), it has to put its data
into shared memory array "order[]" index 0 to 3 */
int start = prefixSum[blockThreadIndex];
int end = start + shmQueue[blockThreadIndex];
//int start = (blockThreadIndex == 0) ? 0 : prefixSum[blockThreadIndex - 1];
//int end =prefixSum[blockThreadIndex];
int localIndex = 0;
/* order[] is sorted automatically because queue[] is already sorted. */
for (int i = start; i < end; i++) {
order[i] = queue[localIndex++];
}
__syncthreads();
/* To get correct value of array "order", all threads has to stop here before
executing below loop */
/* Loop circle indices that are stored in shared memory array "order[]" */
for (int i= 0 ; i < globalIndex; i++) {
int a = order[i];
int index3 = 3 * a;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
/* calculate center point of each pixel which is manged by a thread */
float2 pixelCenterNorm0 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY) + 0.5f));
float2 pixelCenterNorm1 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY) + 0.5f));
float2 pixelCenterNorm2 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY+ 1) + 0.5f));
float2 pixelCenterNorm3 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
/*
float2 pixelCenterNorm4 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm5 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm6 = make_float2(invWidth * (static_cast<float>(imageX + 2) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm7 = make_float2(invWidth * (static_cast<float>(imageX + 3) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm8 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm9 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm10 = make_float2(invWidth * (static_cast<float>(imageX + 2) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm11 = make_float2(invWidth * (static_cast<float>(imageX + 3) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm12 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
float2 pixelCenterNorm13 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
float2 pixelCenterNorm14 = make_float2(invWidth * (static_cast<float>(imageX + 2) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
float2 pixelCenterNorm15 = make_float2(invWidth * (static_cast<float>(imageX + 3) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
*/
/* each pixel will color RGB in parallel, because each thread has their own range of boundary of pixels */
shadePixel(a, pixelCenterNorm0, p, &localImgData0);
shadePixel(a, pixelCenterNorm1, p, &localImgData1);
shadePixel(a, pixelCenterNorm2, p, &localImgData2);
shadePixel(a, pixelCenterNorm3, p, &localImgData3);
/*
shadePixel(a, pixelCenterNorm4, p, &localImgData4);
shadePixel(a, pixelCenterNorm5, p, &localImgData5);
shadePixel(a, pixelCenterNorm6, p, &localImgData6);
shadePixel(a, pixelCenterNorm7, p, &localImgData7);
shadePixel(a, pixelCenterNorm8, p, &localImgData8);
shadePixel(a, pixelCenterNorm9, p, &localImgData9);
shadePixel(a, pixelCenterNorm10, p, &localImgData10);
shadePixel(a, pixelCenterNorm11, p, &localImgData11);
shadePixel(a, pixelCenterNorm12, p, &localImgData12);
shadePixel(a, pixelCenterNorm13, p, &localImgData13);
shadePixel(a, pixelCenterNorm14, p, &localImgData14);
shadePixel(a, pixelCenterNorm15, p, &localImgData15);
//shadePixel(a, pixelCenterNorm2, p, &localImgData2);
//shadePixel(a, pixelCenterNorm3, p, &localImgData3);
//shadePixel(a, pixelCenterNorm4, p, &localImgData4);
//shadePixel(a, pixelCenterNorm, p, &shmImgData[threadIdx.y * 16 + threadIdx.x]);
*/
}
/* finally 2x2 pixels' imgData is copied into global memory */
*imgPtr0 = localImgData0;
*imgPtr1 = localImgData1;
*imgPtr2 = localImgData2;
*imgPtr3 = localImgData3;
/*
*imgPtr4 = localImgData4;
*imgPtr5 = localImgData5;
*imgPtr6 = localImgData6;
*imgPtr7 = localImgData7;
*imgPtr8 = localImgData8;
*imgPtr9 = localImgData9;
*imgPtr10 = localImgData10;
*imgPtr11 = localImgData11;
*imgPtr12 = localImgData12;
*imgPtr13 = localImgData13;
*imgPtr14 = localImgData14;
*imgPtr15 = localImgData15;
*/
}
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
/*
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
*/
/*
int size = 2000;
if (sceneName == CIRCLE_RGB || sceneName == CIRCLE_RGBY)
size = 300;
else if (sceneName == CIRCLE_TEST_10K)
size = 300;
else if (sceneName == CIRCLE_TEST_100K)
size = 1900;
else
size = 2800;
printf("before kenrel size: %d\n", size);
*/
dim3 blockDim(16, 16);
dim3 gridDim(
(image->width + (blockDim.x * 2) - 1) / (blockDim.x * 2),
(image->height + (blockDim.y * 2) - 1) / (blockDim.y * 2));
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim), dim3(blockDim), 0, 0, );
hipDeviceSynchronize();
}
| 8d7a35611dd6322254371824b5308e3f51da0ced.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <thrust/scan.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define SCAN_BLOCK_DIM 256 // needed by sharedMemExclusiveScan implementation
#include "exclusiveScan.cu_inl"
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
/*
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles() {
//int table[1024][1024] = {0};
__shared__ int table[1024][1024];
int index = blockIdx.x * blockDim.x + threadIdx.x;
//__shared__ float shmImgPtr[256][180];
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
//printf("screenMaxX - screenMinX: %d\n", screenMaxX- screenMinX);
// for all pixels in the bonding box
for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) {
for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) {
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f),
invHeight * (static_cast<float>(pixelY) + 0.5f));
float diffX = p.x - pixelCenterNorm.x;
float diffY = p.y - pixelCenterNorm.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[index];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist <= maxDist)
table[pixelX][pixelY]++;
//shadePixel(index, pixelCenterNorm, p, imgPtr,);//&shmImgPtr[threadIdx.x][4 * a]);
//imgPtr++;
}
}
}
*/
/* prefixSum provided by nVidia, but I failed to use this function */
////////////////////////////////////////////////////////////////////////////////////////
__device__ void prescan(uint *g_odata, uint *g_idata, int n)
{
__shared__ uint temp[512];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
int ai = thid;
int bi = thid + (n/2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(ai);
temp[ai + bankOffsetA] = g_idata[ai];
temp[bi + bankOffsetB] = g_idata[bi];
for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid==0) {
//temp[n – 1 /*+ CONFLICT_FREE_OFFSET(n - 1)*/ ] = 0;
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[ai] = temp[ai + bankOffsetA];
g_odata[bi] = temp[bi + bankOffsetB];
}
__global__ void kernelRenderCircles() {
/* this queue is intended for remembering circle index */
int queue[35];
int queueIndex = 0;
/* These sharemd memory array will be used in prefixSum function */
__shared__ uint shmQueue[256]; //input of prefixSum : the role of this array is to divide index of order[] array
__shared__ uint prefixSum[256]; //output of prefixSum
__shared__ uint prefixSumScratch[2 * 256]; //The comments inside a prefixSum library file says we need this to calculate it
/* This array contains circle indices that is colored inside a threa block boundary(32 x 32 pixels),
and they are sorted by ascending order */
__shared__ int order[2900];
/* Statement shown in line 542(extern keyword) used for dynamic allocation of shared memory.
Reducing the size of shared memory array has positive impact on the execution time.
From the fact that each image(e.g., rgb, littlebig, rand10k, ...) needs different array size,
I tried to allocate different array size according to image(e.g., rgb, littlebing, ...),
but when I use it, it gives me wrong result. I don't know why. */
//extern __shared__ int order[];
int blockThreadIndex = blockDim.x * threadIdx.y + threadIdx.x;
int numCircles = cuConstRendererParams.numCircles;
int threadsPerBlock = blockDim.x * blockDim.y;
/* each thread will handle the number of circles stored in variable 'circle' */
int circle = (numCircles + threadsPerBlock - 1) / threadsPerBlock;
/* imageX and imageY are the location of image pixels assigned for this thread within boundary. */
//int imageX = blockIdx.x * blockDim.x + threadIdx.x; // This is intended for assiging each thread 1x1 pixel.
//int imageY = blockIdx.y * blockDim.y + threadIdx.y;
/*Each thread will deal with 2x2 pixels, not 1x1 pixel by multiplying 2.*/
int imageX = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
int imageY = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
/* Thess variables describe pixel boundary of thread block. */
//int pixelXFrom = blockDim.x * blockIdx.x; //e.g., 0, 16, 32, ...
//int pixelXTo = blockDim.x * (blockIdx.x + 1) - 1; // 15, 31, 63, ...
//int pixelYFrom = blockDim.y * blockIdx.y;
//int pixelYTo = blockDim.y * (blockIdx.y + 1) - 1;
/* Number 2 is intended for 32 x 32 pixels, not 16 x 16 pixels. */
int pixelXFrom = blockDim.x * blockIdx.x * 2; //e.g., 0, 64, 128, ...
int pixelXTo = 2 * blockDim.x * (blockIdx.x + 1) - 1; // 63, 127, 255, ...
int pixelYFrom = blockDim.y * blockIdx.y * 2;
int pixelYTo = 2 * blockDim.y * (blockIdx.y + 1) - 1;
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
/* each thread only handles their pixel boundary(2 x 2 pixels),
and these are used to copy global memory data into local memory. */
float4 *imgPtr0 = (float4*)(&cuConstRendererParams.imageData[4 * (imageY * imageWidth + imageX)]);
float4 *imgPtr1 = (float4*)(&cuConstRendererParams.imageData[4 * (imageY * imageWidth + imageX + 1)]);
float4 *imgPtr2 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX)]);
float4 *imgPtr3 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 1)]);
/*
float4 *imgPtr4 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX)]);
float4 *imgPtr5 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 1)]);
float4 *imgPtr6 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 2)]);
float4 *imgPtr7 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 1) * imageWidth + imageX + 3)]);
float4 *imgPtr8 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2)* imageWidth + imageX)]);
float4 *imgPtr9 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2) * imageWidth + imageX + 1)]);
float4 *imgPtr10 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2) * imageWidth + imageX + 2)]);
float4 *imgPtr11 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 2)* imageWidth + imageX + 3)]);
float4 *imgPtr12 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3) * imageWidth + imageX)]);
float4 *imgPtr13 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3) * imageWidth + imageX + 1)]);
float4 *imgPtr14 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3)* imageWidth + imageX + 2)]);
float4 *imgPtr15 = (float4*)(&cuConstRendererParams.imageData[4 * ((imageY + 3)* imageWidth + imageX + 3)]);
*/
/* Copy rgb data in global memory into local memory */
float4 localImgData0 = *imgPtr0;
float4 localImgData1 = *imgPtr1;
float4 localImgData2 = *imgPtr2;
float4 localImgData3 = *imgPtr3;
/*
float4 localImgData4 = *imgPtr4;
float4 localImgData5 = *imgPtr5;
float4 localImgData6 = *imgPtr6;
float4 localImgData7 = *imgPtr7;
float4 localImgData8 = *imgPtr8;
float4 localImgData9 = *imgPtr9;
float4 localImgData10 = *imgPtr10;
float4 localImgData11 = *imgPtr11;
float4 localImgData12 = *imgPtr12;
float4 localImgData13 = *imgPtr13;
float4 localImgData14 = *imgPtr14;
float4 localImgData15 = *imgPtr15;
*/
/* Each thread deals with circle indices(From and To) shown in below to
check whether they are within or across the boundary of this thread block */
/* When there exist only three circles to be drawn, then each thread has variable
circleIndexFrom: 0, 1, 2, 3, ... , circleIndexTo: 0, 1, 2, 3, ... , which means
, in this case, thread number from 3 to 255 will execute for loop described in below.
However, it doesn't matter because variable "p" and "rad"(in for looop) will have zero valuee */
int circleIndexFrom = blockThreadIndex * circle;
int circleIndexTo = (blockThreadIndex + 1) * circle - 1;
for (int i = circleIndexFrom; i <= circleIndexTo; i++) {
int index3 = 3 * i;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[i];
//float newRadWidth = rad * imageWidth;
//float newRadHeight = rad * imageHeight;
/* "rad" is normalized to 0 ~ 1023.xxxxxx */
float extendXLeft = pixelXFrom - (rad * imageWidth);
float extendXRight = pixelXTo + (rad * imageWidth);
float extendYTop = pixelYFrom - (rad * imageHeight);
float extendYBottom = pixelYTo + (rad * imageHeight);
/* "circle coordinate" is normailzed to 0 ~ 1023.xxxxxx */
float circleX = p.x * imageWidth;
float circleY = p.y * imageHeight;
/* This will check whether the circle index "i" exist within or across the boundary of this thread block's pixels */
/* Multiplying the value 1.01 and 0.99 is very important to work correctly,
Due to the small error from above(maybe the gap between normalized value(~1023) and floating value(0.xxx),
I have to multply these constant, it is similar to extend the boundary of thread block's pixel */
/* I found this fact unexpectedly, because some of the results show me "correctness failed", others "correctness pass" */
if (extendXLeft <= circleX * 1.01 && extendXRight >= circleX * 0.99 && extendYTop <= circleY * 1.01 && extendYBottom >= circleY * 0.99) {
queue[queueIndex++] = i;
}
}
/* Each thread copy their queueIndex(which has the number of circles inside or across thread block's boundary)
into shared memory array "shmQueue[]" */
/* For example, if each thread (here three) has queueIndex 3, 7, 5 respectively,
then, shmQueue[0] = 3, shmQueue[1] = 7, shmQueue[2] = 5 */
shmQueue[blockThreadIndex] = queueIndex;
__syncthreads();
/* Because "sharedMemExclusiveScan uses input data "shmQueue[]", we have to guarantee
that when "sharedMemExclusiveScan is called, "shmQueue[]" must be in consistent state,
which means all data had to be copied into this array at this point */
/* "prescan" is prefixSum algorithm providied by nVidia. I tried to use this to get
fast execution time, but failed to get correct result. Maybe I missed something. */
//prescan(prefixSum, shmQueue, 256);
//__syncthreads();
/* All threads, together, in this thread block will calculate prefixSum. */
/* For example, from the above example, the final result of this functions is:
[0] [1] [2]
shmQueue[] 3 7 5
prefixSum[] 0 3 10
*/
sharedMemExclusiveScan(blockThreadIndex, shmQueue, prefixSum, prefixSumScratch, 256);
__syncthreads();
/* We have to guarantee that all threads must be located at this point. This is because
if some of threads are still in shareMemExclusiveScan, which means
they are still calculating prefixSum, other threads that is executing below code will
get incorrect value of prefixSum[]*/
/* "globalIndex" will be the total number of circles that will be processed by this thread block */
int globalIndex = prefixSum[255] + shmQueue[255];
/* By using prefixSum[] array it can calculate where to put its data */
/* For example, because threadIndex0 owns 3 circles (shown in above), it has to put its data
into shared memory array "order[]" index 0 to 3 */
int start = prefixSum[blockThreadIndex];
int end = start + shmQueue[blockThreadIndex];
//int start = (blockThreadIndex == 0) ? 0 : prefixSum[blockThreadIndex - 1];
//int end =prefixSum[blockThreadIndex];
int localIndex = 0;
/* order[] is sorted automatically because queue[] is already sorted. */
for (int i = start; i < end; i++) {
order[i] = queue[localIndex++];
}
__syncthreads();
/* To get correct value of array "order", all threads has to stop here before
executing below loop */
/* Loop circle indices that are stored in shared memory array "order[]" */
for (int i= 0 ; i < globalIndex; i++) {
int a = order[i];
int index3 = 3 * a;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
/* calculate center point of each pixel which is manged by a thread */
float2 pixelCenterNorm0 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY) + 0.5f));
float2 pixelCenterNorm1 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY) + 0.5f));
float2 pixelCenterNorm2 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY+ 1) + 0.5f));
float2 pixelCenterNorm3 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
/*
float2 pixelCenterNorm4 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm5 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm6 = make_float2(invWidth * (static_cast<float>(imageX + 2) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm7 = make_float2(invWidth * (static_cast<float>(imageX + 3) + 0.5f),
invHeight * (static_cast<float>(imageY + 1) + 0.5f));
float2 pixelCenterNorm8 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm9 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm10 = make_float2(invWidth * (static_cast<float>(imageX + 2) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm11 = make_float2(invWidth * (static_cast<float>(imageX + 3) + 0.5f),
invHeight * (static_cast<float>(imageY + 2) + 0.5f));
float2 pixelCenterNorm12 = make_float2(invWidth * (static_cast<float>(imageX) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
float2 pixelCenterNorm13 = make_float2(invWidth * (static_cast<float>(imageX + 1) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
float2 pixelCenterNorm14 = make_float2(invWidth * (static_cast<float>(imageX + 2) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
float2 pixelCenterNorm15 = make_float2(invWidth * (static_cast<float>(imageX + 3) + 0.5f),
invHeight * (static_cast<float>(imageY + 3) + 0.5f));
*/
/* each pixel will color RGB in parallel, because each thread has their own range of boundary of pixels */
shadePixel(a, pixelCenterNorm0, p, &localImgData0);
shadePixel(a, pixelCenterNorm1, p, &localImgData1);
shadePixel(a, pixelCenterNorm2, p, &localImgData2);
shadePixel(a, pixelCenterNorm3, p, &localImgData3);
/*
shadePixel(a, pixelCenterNorm4, p, &localImgData4);
shadePixel(a, pixelCenterNorm5, p, &localImgData5);
shadePixel(a, pixelCenterNorm6, p, &localImgData6);
shadePixel(a, pixelCenterNorm7, p, &localImgData7);
shadePixel(a, pixelCenterNorm8, p, &localImgData8);
shadePixel(a, pixelCenterNorm9, p, &localImgData9);
shadePixel(a, pixelCenterNorm10, p, &localImgData10);
shadePixel(a, pixelCenterNorm11, p, &localImgData11);
shadePixel(a, pixelCenterNorm12, p, &localImgData12);
shadePixel(a, pixelCenterNorm13, p, &localImgData13);
shadePixel(a, pixelCenterNorm14, p, &localImgData14);
shadePixel(a, pixelCenterNorm15, p, &localImgData15);
//shadePixel(a, pixelCenterNorm2, p, &localImgData2);
//shadePixel(a, pixelCenterNorm3, p, &localImgData3);
//shadePixel(a, pixelCenterNorm4, p, &localImgData4);
//shadePixel(a, pixelCenterNorm, p, &shmImgData[threadIdx.y * 16 + threadIdx.x]);
*/
}
/* finally 2x2 pixels' imgData is copied into global memory */
*imgPtr0 = localImgData0;
*imgPtr1 = localImgData1;
*imgPtr2 = localImgData2;
*imgPtr3 = localImgData3;
/*
*imgPtr4 = localImgData4;
*imgPtr5 = localImgData5;
*imgPtr6 = localImgData6;
*imgPtr7 = localImgData7;
*imgPtr8 = localImgData8;
*imgPtr9 = localImgData9;
*imgPtr10 = localImgData10;
*imgPtr11 = localImgData11;
*imgPtr12 = localImgData12;
*imgPtr13 = localImgData13;
*imgPtr14 = localImgData14;
*imgPtr15 = localImgData15;
*/
}
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
/*
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
*/
/*
int size = 2000;
if (sceneName == CIRCLE_RGB || sceneName == CIRCLE_RGBY)
size = 300;
else if (sceneName == CIRCLE_TEST_10K)
size = 300;
else if (sceneName == CIRCLE_TEST_100K)
size = 1900;
else
size = 2800;
printf("before kenrel size: %d\n", size);
*/
dim3 blockDim(16, 16);
dim3 gridDim(
(image->width + (blockDim.x * 2) - 1) / (blockDim.x * 2),
(image->height + (blockDim.y * 2) - 1) / (blockDim.y * 2));
kernelRenderCircles<<<gridDim, blockDim>>>();
cudaDeviceSynchronize();
}
|
02ff30e7738966d973ccc40f60a4f9697f742769.hip | // !!! This is a file automatically generated by hipify!!!
/* Bluebird Library - High performance CPUs and GPUs computing library.
*
* Copyright (C) 2012-2013 Orange Owl Solutions.
*
* This file is part of Bluebird Library.
* Bluebird Library is free software: you can redistribute it and/or modify
* it under the terms of the Lesser GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Bluebird Library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Lesser GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Bluebird Library. If not, see <http://www.gnu.org/licenses/>.
*
*
* For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/
* or send an e-mail to: [email protected]
*
*
*/
using namespace std;
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <conio.h>
#include <typeinfo>
#include <iostream>
// includes CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ComplexTypes.cuh"
#include "ComplexTypes.cu"
using namespace BB;
// includes Expression Templates library
#include "Macros.h"
#include "HExceptions.h"
#include "HExceptions.cpp"
#include "DExceptions.cuh"
#include "Constants.h"
#include "DStreams.cuh"
#include "DStreams.cu"
#include "P2P.cu"
#include "SubIndicesAccessHandling.h"
#include "SubIndicesAccessHandling.cpp"
#include "DGlobals.cuh"
#include "CudaExpressionMacros.cuh"
////#include "CudaExpressionKernels.cuh"
#include "Expression.cuh"
//
//// includes CUDA Expression Templates library
#include "Scalar.cuh"
#include "Promotion.cuh"
#include "Hmatrix.h"
#include "Hmatrix.cpp"
#include "DMatrixExpressionKernels.cuh"
#include "Dmatrix.cuh"
#include "Dmatrix.cu"
////#include "CudaMatrixExpression.cuh"
#include "Addition.cuh"
#include "Addition.cu"
#include "Subtraction.cuh"
#include "Subtraction.cu"
#include "Multiplication.cuh"
#include "Multiplication.cu"
#include "Division.cuh"
#include "Division.cu"
#include "Functions.cuh"
#include "Functions.cu"
#include "Utility.cuh"
#include "Grid_hip.cuh"
#include "SubMatrixExpression.h"
#include "DFFT.cuh"
#include "DBLAS.cuh"
#include "TimingCPU.h"
#include "TimingCPU.cpp"
#include "TimingGPU.cuh"
#include "TimingGPU.cu"
#include "DReduction.cuh"
#include "HReduction.h"
#include "InputOutPut.h"
#include "Utility.h"
//#pragma comment(lib, "winmm.lib")
// Input/Output
// --- Overload of << for int2_
std::ostream & operator << (std::ostream&, const int2_&);
// --- Overload of << for float2_
std::ostream & operator << (std::ostream&, const float2_&);
// --- Overload of << for double2_
std::ostream & operator << (std::ostream&, const double2_&);
// --- Overload of << for type T Hmatrix (int, float, double, int2_, float2_, double2_)
template <class T> std::ostream& operator << (std::ostream&, const Hmatrix<T>&);
// --- Overload of << for type T Dmatrix (int, float, double, int2_, float2_, double2_)
template <class T> std::ostream & operator << (std::ostream&, const Dmatrix<T>&);
/**********************************/
/* OVERLOAD OF << FOR EXPRESSIONS */
/**********************************/
template <class Q, class T>
ostream & operator << (ostream & output, const Expr<Q,T> v)
{
Hmatrix<T> a(v.GetRows(),v.GetColumns());
if (v.IsDevice()) {
Dmatrix<T> b(v.GetRows(),v.GetColumns());
b = v;
a = b; }
else {
a = v; }
output << a;
return output;
}
// Constant to CPU Hmatrix assignment
template <class OutType>
const BB::Hmatrix<OutType>& BB::Hmatrix<OutType>::operator=(const OutType c)
{
*this = BB::Expr<BB::Scalar<OutType>,OutType>(BB::Scalar<OutType>(c),Rows_,Columns_,ISHOST);
return *this;
}
// Constant to GPU Hmatrix assignment
template <class OutType>
const BB::Dmatrix<OutType>& BB::Dmatrix<OutType>::operator=(const OutType c)
{
*this = BB::Expr<BB::Scalar<OutType>,OutType>(BB::Scalar<OutType>(c),Rows_,Columns_,ISDEVICE);
return *this;
}
void main( int argc, char** argv)
{
// --- You should always initialize the streams and set the active stream before doing anything else
streams[0].InitStreams(1);
streams[0].SetStream(0);
// --- Introduction of this scoping is needed to avoid issues with class destructors when using hipDeviceReset();
{
int NumRows = 4;
int NumColumns = 4;
int NumElements = NumRows*NumColumns;
// --- The types the library can deal with are int, float, double, int2_, float2_, double2_ (the latter three being complex types)
// --- Defining GPU matrices is simple...
Dmatrix<double2_> A_D(NumRows,NumColumns);
Dmatrix<double2_> B_D(NumRows,NumColumns);
Dmatrix<double2_> C_D(NumRows,NumColumns);
// --- Defining CPU matrices is as simple as for GPU matrices...
Hmatrix<double2_> A_H(NumRows,NumColumns);
Hmatrix<double2_> B_H(NumRows,NumColumns);
Hmatrix<double2_> C_H(NumRows,NumColumns);
// --- The default type DefaultType is set in Constant.h. You can change it. Useful to simplify the syntax. If you want to define a matrix of
// type DefaultType, then just use, for example, Dmatrix<> A_H(NumRows,NumColumns);
// --- Setting matrices to 0s or 1s is simple and uses the same syntax for CPU and GPU matrices
A_H = SetOnes(NumRows,NumColumns); // SetOnes is Matlab's ones equivalent. If SetOnes does not return the DefaultType, use for instance SetOnes<float>(NumRows,NumColumns);
B_D = SetZeros(NumRows,NumColumns); // SetZeros is Matlab's zeros equivalent. If SetZeros does not return the DefaultType, use for instance SetOnes<float>(NumRows,NumColumns);
// --- You can use cout to display the results
cout << "Displaying A_H " << endl << endl;
cout << A_H << endl;
cout << "Displaying B_H " << endl << endl;
cout << B_D << endl;
// --- Moving matrices from CPU to GPU and viceversa...
B_H = B_D;
A_D = A_H;
// --- Assigning matrices from CPU to CPU and from GPU to GPU. All the possible assignments from real to real (e.g., float = int) are defined.
// Assignments from real to complex (double2_ = float) are also defined.
C_H = B_H;
C_D = B_H;
// --- You can initialize CPU or GPU matrices on expressions or on other CPU or GPU matrices (CPU on GPU and GPU on CPU initializations are possible)
Hmatrix<float> D_H(EqSpace<float>(3,NumElements+3,NumElements),PINNED); // uses PINNED memory for D_H. This is important for async
// global memory transactions.
// EqSpace is Matlab's linspace equivalent.
Dmatrix<float> D_D(D_H);
// --- You can mimic the Matlab's ":" operator
Dmatrix<float> E_D(Colon<float>(1,4)); // Equivalent of Matlab's 1:4;
Hmatrix<float> E_H(Colon<float>(1,2,4)); // Equivalent of Matlab's 1:2:4;
// --- You can read individual elements for CPU and GPU matrices ...
cout << E_D(0) << endl;
cout << E_H(0) << endl;
// --- ... similarly, you can write individual elements for CPU matrices ...
E_H(0)=3;
// --- ... and for GPU matrices.
cout << "Before the assignment: E_D(0) = " << E_D(0) << "\n\n";
E_D(0)=3.;
cout << "After the assignment: E_D(0) = " << E_D(0) << "\n\n";
cout << "Before the assignment: A_D(0) = " << A_D(0) << "\n\n";
A_D(0)=double2_(33.,0.);
cout << "After the assignment: A_D(0) = " << A_D(0) << "\n\n";
// --- You can resize both CPU and GPU matrices, for example as
cout << "Original number of rows = " << A_D.GetRows() << "\n\n";
cout << "Original number of columns = " << A_D.GetColumns() << "\n\n";
A_D.Resize(1,NumElements);
cout << "New number of rows = " << A_D.GetRows() << "\n\n";
cout << "New number of columns = " << A_D.GetColumns() << "\n\n";
// --- You can create grids on CPU or GPU with a syntax similar to Matlab's meshgrid.
// H_D=GridX(F_D,G_D); requires both F_D and G_D to be vectors. Then it fills a matrix H_D of size length(G_D)xlength(F_D) that replicates
// the vector F_D along the rows.
Dmatrix<float> F_D(EqSpace<float>(1,4,4));
Dmatrix<float> G_D(EqSpace<float>(1,3,3));
Dmatrix<float> H_D(3,4);
H_D = GridX(F_D,G_D);
cout << H_D << "\n";
// I_D=GridX(F_D,G_D); requires both F_D and G_D to be vectors. Then it fills a matrix I_D of size length(G_D)xlength(F_D) that replicates
// the vector G_D along the columns.
Dmatrix<float> I_D(3,4);
I_D = GridY(F_D,G_D);
cout << I_D << "\n";
tie(H_D,I_D)=Grid(F_D,G_D);
cout << H_D << "\n";
cout << I_D << "\n";
// --- You can easily time your applications by CPU and GPU timers.
TimingCPU timerCPU;
TimingGPU timerGPU;
timerCPU.StartCounter();
A_H = B_H * C_H;
cout << "CPU timing = " << timerCPU.GetCounter() << " ms\n";
timerGPU.StartCounter();
A_D.Resize(NumRows,NumColumns);
try { A_D = B_D * C_D; } catch(exception &e) { cout << e.what() << endl; getch(); return; }
cout << "GPU timing = " << timerGPU.GetCounter() << " ms\n";
// --- You can perform FFTs as (FFT is currently limited only to GPU) - the FFT is executed in a proper stream
A_D = SetOnes(NumRows,NumColumns);
B_D = FFT(A_D); // in this case, the plan is calculated internally to the FFT routine
cout << B_D << "\n";
hipfftHandle plan = DEVICE_FFT_2D_PLAN_Z2Z(A_D.GetRows(),A_D.GetColumns()); // in this case, you explicitly calculate the plan and possibly reuse it
B_D = FFT(A_D,plan);
cout << B_D << "\n";
// --- Invere FFTs are also possible
B_D = (1./(NumRows*NumColumns))*IFFT(B_D,plan);
cout << B_D << "\n";
// --- You can also calculate FFTs of expressions
B_D = FFT(sin(3.*B_D));
cout << B_D << "\n";
DEVICE_FFT_DESTROY_PLAN(plan);
// --- You can easily perform matrix-matrix multiplications as (handle automatically created and destroyed) ...
Dmatrix<float2_> L_D(3,5);
Dmatrix<float2_> M_D(5,1);
Dmatrix<float2_> N_D(3,1);
L_D = SetOnes<float2_>(3,5);
M_D = SetOnes<float2_>(5,1);
try { N_D = MatMul(L_D,M_D); } catch(exception &e) { cout << e.what() << endl; getch(); return; }
cout << N_D << "\n";
// --- ... or (handle manually created and destroyed)
hipblasHandle_t handle = DEVICE_BLAS_CREATE_HANDLE();
N_D = MatMul(sin(L_D),cos(M_D),handle);
DEVICE_BLAS_DESTROY_HANDLE(handle);
cout << N_D << "\n";
// --- You can output expressions with cout
cout << sin(L_D) << endl;
Hmatrix<float2_> L_H(L_D);
cout << sin(L_H) << endl;
// --- Extracting Sub-Expressions (Range)
Hmatrix<float2_> N_H(N_D);
Hmatrix<float2_> M_H(M_D);
N_H.Resize(1,3);
N_H = M_H(Range(0,2));
N_D.Resize(1,3);
N_D = M_D(Range(0,2));
cout << sin(N_D)(Range(0,2)) << endl;
// --- Extracting Sub-Expressions (int-Range)
M_H.Resize(1,5);
M_D.Resize(1,5);
N_H = M_H(0,Range(0,2));
N_D = M_D(0,Range(0,2));
cout << sin(N_D)(0,Range(0,2)) << endl;
// --- Extracting Sub-Expressions (Range-Int)
Hmatrix<float2_> O_H(5,4);
Hmatrix<float2_> P_H(3,1);
O_H(0,0) = 0.; O_H(1,0) = 1.; O_H(2,0) = 2.; O_H(3,0) = 3.; O_H(4,0) = 4.;
O_H(0,1) = 5.; O_H(1,1) = 6.; O_H(2,1) = 7.; O_H(3,1) = 8.; O_H(4,1) = 9.;
O_H(0,2) = 10.; O_H(1,2) = 11.; O_H(2,2) = 12.; O_H(3,2) = 13.; O_H(4,2) = 14.;
O_H(0,3) = 15.; O_H(1,3) = 16.; O_H(2,3) = 17.; O_H(3,3) = 18.; O_H(4,3) = 19.;
P_H = O_H(Range(1,3),2);
Dmatrix<float2_> O_D(O_H);
Dmatrix<float2_> P_D(P_H);
P_D = O_D(Range(1,3),0);
cout << sin(O_D)(Range(0,2),0) << "\n";
// --- Extracting Sub-Expressions (Range-Range)
Hmatrix <float2_> Q_H(3,2);
Dmatrix <float2_> Q_D(3,2);
Q_H = O_H(Range(2,4),Range(1,2));
Q_D = O_D(Range(2,4),Range(1,2));
cout << sin(O_H)(Range(2,4),Range(1,2)) << "\n";
// --- Extracting Sub-Expressions (Span-int)
Hmatrix <float2_> R_H(5,1);
Dmatrix <float2_> R_D(5,1);
R_H = O_H(Span,2);
R_D = O_D(Span,1);
cout << sin(O_D)(Span,2) << endl;
// --- Extracting Sub-Expressions (int-Span)
Hmatrix <float2_> S_H(1,4);
Dmatrix <float2_> S_D(1,4);
S_H = O_H(1,Span);
S_D = O_D(2,Span);
cout << sin(O_D)(2,Span) << endl;
// --- Extracting Sub-Expressions (int-RangeStep)
Hmatrix <float2_> T_H(1,2);
Dmatrix <float2_> T_D(1,2);
T_H = O_H(1,RangeStep(0,2,3));
T_D = O_D(1,RangeStep(0,2,3));
cout << sin(O_H)(1,RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-int)
T_H.Resize(2,1);
T_D.Resize(2,1);
T_H = O_H(RangeStep(1,2,4),1);
T_D = O_D(RangeStep(1,2,4),1);
cout << sin(O_D)(RangeStep(1,2,4),1) << endl;
// --- Extracting Sub-Expressions (Range-RangeStep)
Q_H = O_H(Range(1,3),RangeStep(0,2,3));
Q_D = O_D(Range(1,3),RangeStep(0,2,3));
cout << cos(O_H)(Range(1,3),RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-Range)
Q_H = O_H(RangeStep(0,2,4),Range(1,2));
Q_D = O_D(RangeStep(0,2,4),Range(1,2));
cout << cos(O_D)(RangeStep(0,2,4),Range(1,2)) << endl;
// --- Extracting Sub-Expressions (RangeStep-RangeStep)
Q_H = O_H(RangeStep(0,2,4),RangeStep(0,2,3));
Q_D = O_D(RangeStep(0,2,4),RangeStep(0,2,3));
cout << cos(O_D)(RangeStep(0,2,4),RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-RangeStep)
Q_H = O_H(RangeStep(0,2,4),RangeStep(0,2,3));
Q_D = O_D(RangeStep(0,2,4),RangeStep(0,2,3));
cout << cos(O_D)(RangeStep(0,2,4),RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (Range-Span)
Hmatrix<float2_> U_H(3,4);
Dmatrix<float2_> U_D(3,4);
U_H = O_H(Range(0,2),Span);
U_D = O_D(Range(0,2),Span);
cout << cos(O_H)(Range(0,2),Span) << endl;
// --- Extracting Sub-Expressions (Span-Range)
Hmatrix<float2_> V_H(5,3);
Dmatrix<float2_> V_D(5,3);
V_H = O_H(Span,Range(0,2));
V_D = O_D(Span,Range(0,2));
cout << sin(O_D)(Span,Range(0,2)) << endl;
// --- Extracting Sub-Expressions (Span-RangeStep)
Hmatrix<float2_> W_H(5,2);
Dmatrix<float2_> W_D(5,2);
W_H = O_H(Span,RangeStep(0,2,3));
W_D = O_D(Span,RangeStep(0,2,3));
cout << sin(O_H)(Span,RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-Span)
Hmatrix<float2_> X_H(2,4);
Dmatrix<float2_> X_D(2,4);
X_H = O_H(RangeStep(0,2,3),Span);
X_D = O_D(RangeStep(0,2,3),Span);
cout << sin(O_H)(RangeStep(0,2,3),Span) << endl;
//Hmatrix<float2_> W_H(5,2);
//Dmatrix<float2_> W_D(5,2);
//W_H = O_H(Span,RangeStep(0,2,3));
//W_D = O_D(Span,RangeStep(0,2,3));
// --- Reduction (+) - real case - Dmatrix
Hmatrix<double> ar(1,20);
for (unsigned i=0; i<20; ++i) {
ar(i) = 1;
}
Dmatrix<double> br(ar);
double sumr = SumAll(sin(br));
double sumrCPU = SumAll(sin(ar));
cout << "CPU reduction result: " << sumrCPU << endl;
cout << "GPU reduction result: " << sumr << endl;
// --- Reduction (+) - complex case - Dmatrix
Hmatrix<double2_> ac(1,20);
for (unsigned i=0; i<20; ++i) {
ac(i).c.x = 1;
ac(i).c.y = 2;
}
Dmatrix<double2_> bc(ac);
double2_ sumc = SumAll(bc);
double2_ sumcCPU = SumAll(ac);
cout << "CPU reduction result: real part = " << sumcCPU.c.x << "; imaginary part = " << sumcCPU.c.y << endl;
cout << "GPU reduction result: real part = " << sumcCPU.c.x << "; imaginary part = " << sumcCPU.c.y << endl;
}
hipDeviceReset();
std::cout << "Going to sleep" << std::endl;
getch();
}
| 02ff30e7738966d973ccc40f60a4f9697f742769.cu | /* Bluebird Library - High performance CPUs and GPUs computing library.
*
* Copyright (C) 2012-2013 Orange Owl Solutions.
*
* This file is part of Bluebird Library.
* Bluebird Library is free software: you can redistribute it and/or modify
* it under the terms of the Lesser GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Bluebird Library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Lesser GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Bluebird Library. If not, see <http://www.gnu.org/licenses/>.
*
*
* For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/
* or send an e-mail to: [email protected]
*
*
*/
using namespace std;
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <conio.h>
#include <typeinfo>
#include <iostream>
// includes CUDA
#include <cuda.h>
#include <cuda_runtime.h>
#include "ComplexTypes.cuh"
#include "ComplexTypes.cu"
using namespace BB;
// includes Expression Templates library
#include "Macros.h"
#include "HExceptions.h"
#include "HExceptions.cpp"
#include "DExceptions.cuh"
#include "Constants.h"
#include "DStreams.cuh"
#include "DStreams.cu"
#include "P2P.cu"
#include "SubIndicesAccessHandling.h"
#include "SubIndicesAccessHandling.cpp"
#include "DGlobals.cuh"
#include "CudaExpressionMacros.cuh"
////#include "CudaExpressionKernels.cuh"
#include "Expression.cuh"
//
//// includes CUDA Expression Templates library
#include "Scalar.cuh"
#include "Promotion.cuh"
#include "Hmatrix.h"
#include "Hmatrix.cpp"
#include "DMatrixExpressionKernels.cuh"
#include "Dmatrix.cuh"
#include "Dmatrix.cu"
////#include "CudaMatrixExpression.cuh"
#include "Addition.cuh"
#include "Addition.cu"
#include "Subtraction.cuh"
#include "Subtraction.cu"
#include "Multiplication.cuh"
#include "Multiplication.cu"
#include "Division.cuh"
#include "Division.cu"
#include "Functions.cuh"
#include "Functions.cu"
#include "Utility.cuh"
#include "Grid.cuh"
#include "SubMatrixExpression.h"
#include "DFFT.cuh"
#include "DBLAS.cuh"
#include "TimingCPU.h"
#include "TimingCPU.cpp"
#include "TimingGPU.cuh"
#include "TimingGPU.cu"
#include "DReduction.cuh"
#include "HReduction.h"
#include "InputOutPut.h"
#include "Utility.h"
//#pragma comment(lib, "winmm.lib")
// Input/Output
// --- Overload of << for int2_
std::ostream & operator << (std::ostream&, const int2_&);
// --- Overload of << for float2_
std::ostream & operator << (std::ostream&, const float2_&);
// --- Overload of << for double2_
std::ostream & operator << (std::ostream&, const double2_&);
// --- Overload of << for type T Hmatrix (int, float, double, int2_, float2_, double2_)
template <class T> std::ostream& operator << (std::ostream&, const Hmatrix<T>&);
// --- Overload of << for type T Dmatrix (int, float, double, int2_, float2_, double2_)
template <class T> std::ostream & operator << (std::ostream&, const Dmatrix<T>&);
/**********************************/
/* OVERLOAD OF << FOR EXPRESSIONS */
/**********************************/
template <class Q, class T>
ostream & operator << (ostream & output, const Expr<Q,T> v)
{
Hmatrix<T> a(v.GetRows(),v.GetColumns());
if (v.IsDevice()) {
Dmatrix<T> b(v.GetRows(),v.GetColumns());
b = v;
a = b; }
else {
a = v; }
output << a;
return output;
}
// Constant to CPU Hmatrix assignment
template <class OutType>
const BB::Hmatrix<OutType>& BB::Hmatrix<OutType>::operator=(const OutType c)
{
*this = BB::Expr<BB::Scalar<OutType>,OutType>(BB::Scalar<OutType>(c),Rows_,Columns_,ISHOST);
return *this;
}
// Constant to GPU Hmatrix assignment
template <class OutType>
const BB::Dmatrix<OutType>& BB::Dmatrix<OutType>::operator=(const OutType c)
{
*this = BB::Expr<BB::Scalar<OutType>,OutType>(BB::Scalar<OutType>(c),Rows_,Columns_,ISDEVICE);
return *this;
}
void main( int argc, char** argv)
{
// --- You should always initialize the streams and set the active stream before doing anything else
streams[0].InitStreams(1);
streams[0].SetStream(0);
// --- Introduction of this scoping is needed to avoid issues with class destructors when using cudaDeviceReset();
{
int NumRows = 4;
int NumColumns = 4;
int NumElements = NumRows*NumColumns;
// --- The types the library can deal with are int, float, double, int2_, float2_, double2_ (the latter three being complex types)
// --- Defining GPU matrices is simple...
Dmatrix<double2_> A_D(NumRows,NumColumns);
Dmatrix<double2_> B_D(NumRows,NumColumns);
Dmatrix<double2_> C_D(NumRows,NumColumns);
// --- Defining CPU matrices is as simple as for GPU matrices...
Hmatrix<double2_> A_H(NumRows,NumColumns);
Hmatrix<double2_> B_H(NumRows,NumColumns);
Hmatrix<double2_> C_H(NumRows,NumColumns);
// --- The default type DefaultType is set in Constant.h. You can change it. Useful to simplify the syntax. If you want to define a matrix of
// type DefaultType, then just use, for example, Dmatrix<> A_H(NumRows,NumColumns);
// --- Setting matrices to 0s or 1s is simple and uses the same syntax for CPU and GPU matrices
A_H = SetOnes(NumRows,NumColumns); // SetOnes is Matlab's ones equivalent. If SetOnes does not return the DefaultType, use for instance SetOnes<float>(NumRows,NumColumns);
B_D = SetZeros(NumRows,NumColumns); // SetZeros is Matlab's zeros equivalent. If SetZeros does not return the DefaultType, use for instance SetOnes<float>(NumRows,NumColumns);
// --- You can use cout to display the results
cout << "Displaying A_H " << endl << endl;
cout << A_H << endl;
cout << "Displaying B_H " << endl << endl;
cout << B_D << endl;
// --- Moving matrices from CPU to GPU and viceversa...
B_H = B_D;
A_D = A_H;
// --- Assigning matrices from CPU to CPU and from GPU to GPU. All the possible assignments from real to real (e.g., float = int) are defined.
// Assignments from real to complex (double2_ = float) are also defined.
C_H = B_H;
C_D = B_H;
// --- You can initialize CPU or GPU matrices on expressions or on other CPU or GPU matrices (CPU on GPU and GPU on CPU initializations are possible)
Hmatrix<float> D_H(EqSpace<float>(3,NumElements+3,NumElements),PINNED); // uses PINNED memory for D_H. This is important for async
// global memory transactions.
// EqSpace is Matlab's linspace equivalent.
Dmatrix<float> D_D(D_H);
// --- You can mimic the Matlab's ":" operator
Dmatrix<float> E_D(Colon<float>(1,4)); // Equivalent of Matlab's 1:4;
Hmatrix<float> E_H(Colon<float>(1,2,4)); // Equivalent of Matlab's 1:2:4;
// --- You can read individual elements for CPU and GPU matrices ...
cout << E_D(0) << endl;
cout << E_H(0) << endl;
// --- ... similarly, you can write individual elements for CPU matrices ...
E_H(0)=3;
// --- ... and for GPU matrices.
cout << "Before the assignment: E_D(0) = " << E_D(0) << "\n\n";
E_D(0)=3.;
cout << "After the assignment: E_D(0) = " << E_D(0) << "\n\n";
cout << "Before the assignment: A_D(0) = " << A_D(0) << "\n\n";
A_D(0)=double2_(33.,0.);
cout << "After the assignment: A_D(0) = " << A_D(0) << "\n\n";
// --- You can resize both CPU and GPU matrices, for example as
cout << "Original number of rows = " << A_D.GetRows() << "\n\n";
cout << "Original number of columns = " << A_D.GetColumns() << "\n\n";
A_D.Resize(1,NumElements);
cout << "New number of rows = " << A_D.GetRows() << "\n\n";
cout << "New number of columns = " << A_D.GetColumns() << "\n\n";
// --- You can create grids on CPU or GPU with a syntax similar to Matlab's meshgrid.
// H_D=GridX(F_D,G_D); requires both F_D and G_D to be vectors. Then it fills a matrix H_D of size length(G_D)xlength(F_D) that replicates
// the vector F_D along the rows.
Dmatrix<float> F_D(EqSpace<float>(1,4,4));
Dmatrix<float> G_D(EqSpace<float>(1,3,3));
Dmatrix<float> H_D(3,4);
H_D = GridX(F_D,G_D);
cout << H_D << "\n";
// I_D=GridX(F_D,G_D); requires both F_D and G_D to be vectors. Then it fills a matrix I_D of size length(G_D)xlength(F_D) that replicates
// the vector G_D along the columns.
Dmatrix<float> I_D(3,4);
I_D = GridY(F_D,G_D);
cout << I_D << "\n";
tie(H_D,I_D)=Grid(F_D,G_D);
cout << H_D << "\n";
cout << I_D << "\n";
// --- You can easily time your applications by CPU and GPU timers.
TimingCPU timerCPU;
TimingGPU timerGPU;
timerCPU.StartCounter();
A_H = B_H * C_H;
cout << "CPU timing = " << timerCPU.GetCounter() << " ms\n";
timerGPU.StartCounter();
A_D.Resize(NumRows,NumColumns);
try { A_D = B_D * C_D; } catch(exception &e) { cout << e.what() << endl; getch(); return; }
cout << "GPU timing = " << timerGPU.GetCounter() << " ms\n";
// --- You can perform FFTs as (FFT is currently limited only to GPU) - the FFT is executed in a proper stream
A_D = SetOnes(NumRows,NumColumns);
B_D = FFT(A_D); // in this case, the plan is calculated internally to the FFT routine
cout << B_D << "\n";
cufftHandle plan = DEVICE_FFT_2D_PLAN_Z2Z(A_D.GetRows(),A_D.GetColumns()); // in this case, you explicitly calculate the plan and possibly reuse it
B_D = FFT(A_D,plan);
cout << B_D << "\n";
// --- Invere FFTs are also possible
B_D = (1./(NumRows*NumColumns))*IFFT(B_D,plan);
cout << B_D << "\n";
// --- You can also calculate FFTs of expressions
B_D = FFT(sin(3.*B_D));
cout << B_D << "\n";
DEVICE_FFT_DESTROY_PLAN(plan);
// --- You can easily perform matrix-matrix multiplications as (handle automatically created and destroyed) ...
Dmatrix<float2_> L_D(3,5);
Dmatrix<float2_> M_D(5,1);
Dmatrix<float2_> N_D(3,1);
L_D = SetOnes<float2_>(3,5);
M_D = SetOnes<float2_>(5,1);
try { N_D = MatMul(L_D,M_D); } catch(exception &e) { cout << e.what() << endl; getch(); return; }
cout << N_D << "\n";
// --- ... or (handle manually created and destroyed)
cublasHandle_t handle = DEVICE_BLAS_CREATE_HANDLE();
N_D = MatMul(sin(L_D),cos(M_D),handle);
DEVICE_BLAS_DESTROY_HANDLE(handle);
cout << N_D << "\n";
// --- You can output expressions with cout
cout << sin(L_D) << endl;
Hmatrix<float2_> L_H(L_D);
cout << sin(L_H) << endl;
// --- Extracting Sub-Expressions (Range)
Hmatrix<float2_> N_H(N_D);
Hmatrix<float2_> M_H(M_D);
N_H.Resize(1,3);
N_H = M_H(Range(0,2));
N_D.Resize(1,3);
N_D = M_D(Range(0,2));
cout << sin(N_D)(Range(0,2)) << endl;
// --- Extracting Sub-Expressions (int-Range)
M_H.Resize(1,5);
M_D.Resize(1,5);
N_H = M_H(0,Range(0,2));
N_D = M_D(0,Range(0,2));
cout << sin(N_D)(0,Range(0,2)) << endl;
// --- Extracting Sub-Expressions (Range-Int)
Hmatrix<float2_> O_H(5,4);
Hmatrix<float2_> P_H(3,1);
O_H(0,0) = 0.; O_H(1,0) = 1.; O_H(2,0) = 2.; O_H(3,0) = 3.; O_H(4,0) = 4.;
O_H(0,1) = 5.; O_H(1,1) = 6.; O_H(2,1) = 7.; O_H(3,1) = 8.; O_H(4,1) = 9.;
O_H(0,2) = 10.; O_H(1,2) = 11.; O_H(2,2) = 12.; O_H(3,2) = 13.; O_H(4,2) = 14.;
O_H(0,3) = 15.; O_H(1,3) = 16.; O_H(2,3) = 17.; O_H(3,3) = 18.; O_H(4,3) = 19.;
P_H = O_H(Range(1,3),2);
Dmatrix<float2_> O_D(O_H);
Dmatrix<float2_> P_D(P_H);
P_D = O_D(Range(1,3),0);
cout << sin(O_D)(Range(0,2),0) << "\n";
// --- Extracting Sub-Expressions (Range-Range)
Hmatrix <float2_> Q_H(3,2);
Dmatrix <float2_> Q_D(3,2);
Q_H = O_H(Range(2,4),Range(1,2));
Q_D = O_D(Range(2,4),Range(1,2));
cout << sin(O_H)(Range(2,4),Range(1,2)) << "\n";
// --- Extracting Sub-Expressions (Span-int)
Hmatrix <float2_> R_H(5,1);
Dmatrix <float2_> R_D(5,1);
R_H = O_H(Span,2);
R_D = O_D(Span,1);
cout << sin(O_D)(Span,2) << endl;
// --- Extracting Sub-Expressions (int-Span)
Hmatrix <float2_> S_H(1,4);
Dmatrix <float2_> S_D(1,4);
S_H = O_H(1,Span);
S_D = O_D(2,Span);
cout << sin(O_D)(2,Span) << endl;
// --- Extracting Sub-Expressions (int-RangeStep)
Hmatrix <float2_> T_H(1,2);
Dmatrix <float2_> T_D(1,2);
T_H = O_H(1,RangeStep(0,2,3));
T_D = O_D(1,RangeStep(0,2,3));
cout << sin(O_H)(1,RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-int)
T_H.Resize(2,1);
T_D.Resize(2,1);
T_H = O_H(RangeStep(1,2,4),1);
T_D = O_D(RangeStep(1,2,4),1);
cout << sin(O_D)(RangeStep(1,2,4),1) << endl;
// --- Extracting Sub-Expressions (Range-RangeStep)
Q_H = O_H(Range(1,3),RangeStep(0,2,3));
Q_D = O_D(Range(1,3),RangeStep(0,2,3));
cout << cos(O_H)(Range(1,3),RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-Range)
Q_H = O_H(RangeStep(0,2,4),Range(1,2));
Q_D = O_D(RangeStep(0,2,4),Range(1,2));
cout << cos(O_D)(RangeStep(0,2,4),Range(1,2)) << endl;
// --- Extracting Sub-Expressions (RangeStep-RangeStep)
Q_H = O_H(RangeStep(0,2,4),RangeStep(0,2,3));
Q_D = O_D(RangeStep(0,2,4),RangeStep(0,2,3));
cout << cos(O_D)(RangeStep(0,2,4),RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-RangeStep)
Q_H = O_H(RangeStep(0,2,4),RangeStep(0,2,3));
Q_D = O_D(RangeStep(0,2,4),RangeStep(0,2,3));
cout << cos(O_D)(RangeStep(0,2,4),RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (Range-Span)
Hmatrix<float2_> U_H(3,4);
Dmatrix<float2_> U_D(3,4);
U_H = O_H(Range(0,2),Span);
U_D = O_D(Range(0,2),Span);
cout << cos(O_H)(Range(0,2),Span) << endl;
// --- Extracting Sub-Expressions (Span-Range)
Hmatrix<float2_> V_H(5,3);
Dmatrix<float2_> V_D(5,3);
V_H = O_H(Span,Range(0,2));
V_D = O_D(Span,Range(0,2));
cout << sin(O_D)(Span,Range(0,2)) << endl;
// --- Extracting Sub-Expressions (Span-RangeStep)
Hmatrix<float2_> W_H(5,2);
Dmatrix<float2_> W_D(5,2);
W_H = O_H(Span,RangeStep(0,2,3));
W_D = O_D(Span,RangeStep(0,2,3));
cout << sin(O_H)(Span,RangeStep(0,2,3)) << endl;
// --- Extracting Sub-Expressions (RangeStep-Span)
Hmatrix<float2_> X_H(2,4);
Dmatrix<float2_> X_D(2,4);
X_H = O_H(RangeStep(0,2,3),Span);
X_D = O_D(RangeStep(0,2,3),Span);
cout << sin(O_H)(RangeStep(0,2,3),Span) << endl;
//Hmatrix<float2_> W_H(5,2);
//Dmatrix<float2_> W_D(5,2);
//W_H = O_H(Span,RangeStep(0,2,3));
//W_D = O_D(Span,RangeStep(0,2,3));
// --- Reduction (+) - real case - Dmatrix
Hmatrix<double> ar(1,20);
for (unsigned i=0; i<20; ++i) {
ar(i) = 1;
}
Dmatrix<double> br(ar);
double sumr = SumAll(sin(br));
double sumrCPU = SumAll(sin(ar));
cout << "CPU reduction result: " << sumrCPU << endl;
cout << "GPU reduction result: " << sumr << endl;
// --- Reduction (+) - complex case - Dmatrix
Hmatrix<double2_> ac(1,20);
for (unsigned i=0; i<20; ++i) {
ac(i).c.x = 1;
ac(i).c.y = 2;
}
Dmatrix<double2_> bc(ac);
double2_ sumc = SumAll(bc);
double2_ sumcCPU = SumAll(ac);
cout << "CPU reduction result: real part = " << sumcCPU.c.x << "; imaginary part = " << sumcCPU.c.y << endl;
cout << "GPU reduction result: real part = " << sumcCPU.c.x << "; imaginary part = " << sumcCPU.c.y << endl;
}
cudaDeviceReset();
std::cout << "Going to sleep" << std::endl;
getch();
}
|
4c77c50565151c92e6ccfb482c5ec09cb495f0c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <cstdio>
#include <complex>
#include <ctime>
#include <cstring>
#include <fstream>
#include <complex.h>
#include <fftw3.h>
#include <hiprand/hiprand_kernel.h>
//Magnitudes f.
#define m_e 9.10938291e-31
#define e 1.6021e-19
#define m_e 9.10938291e-31 // Masa del Electrn
#define e 1.6021e-19 // Carga del Electrn
#define k_b 1.3806504e-23 // Constante de Boltzmann
#define epsilon_0 8.854187e-12 // Permitividad elctrica del vaco
#define max_SPe 10000 // Limite (computacional) de Superpartculas electrnicas
#define max_SPi 10000 // Limite (computacional) de Superpartculas inicas
#define J_X 4096 // Nmero de puntos de malla X
#define J_Y 1024 // Nmero de puntos de malla Y
int le=0, li=0,kt;
//Distribucin nmeros aleatorios para las coordenadas x.
__device__ double create_Velocities_X(double fmax, double vphi, double aleatorio, hiprandState_t *states) // funcin para generar distribucin semi-maxwelliana de velocidades de las particulas
// (Ver pg. 291 Computational Physics Fitzpatrick: Distribution functions--> Rejection Method)
{
double sigma=vphi; // sigma=vflujo=vth ( "dispersin" de la distribucin Maxweliana)
double vmin= 0. ; // Rapidez mnima
double vmax= 4.*sigma; // Rapidez mxima
double v,f,f_random;
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
while (true) {
v=vmin+(vmax-vmin)*aleatorio; // Calcular valor aleatorio de v uniformemente distribuido en el rango [vmin,vmax]
f =fmax*exp(-(1.0/M_PI)*pow(v/vphi,2)); //
f_random = fmax*aleatorio; // Calcular valor aleatorio de f uniformemente distribuido en el rango [0,fmax]
if (f_random > f)
aleatorio = hiprand_uniform(states + Idx);
else
return v;
}
}
// Funcion de distribucin para la coordenadas en y.
__device__ double create_Velocities_Y(double fmax1, double vphi1, double aleatorio, hiprandState_t *states) // funcin para generar distribucin semi-maxwelliana de velocidades de las particulas
// (Ver pg. 291 Computational Physics Fitzpatrick: Distribution functions--> Rejection Method)
{
double sigma=vphi1; // sigma=vflujo=vth ( "dispersin" de la distribucin Maxweliana)
double vmin= -3.*sigma; // Rapidez mnima
double vmax= 3.*sigma; // Rapidez mxima
double v,f,f_random;
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
while (true) {
v=vmin+(vmax-vmin)*aleatorio; // Calcular valor aleatorio de v uniformemente distribuido en el rango [vmin,vmax]
f =fmax1*exp(-(1.0/M_PI)*pow(v/vphi1,2)); //
f_random = fmax1*aleatorio; // Calcular valor aleatorio de f uniformemente distribuido en el rango [0,fmax]
if (f_random > f)
aleatorio = hiprand_uniform(states + Idx);
else
return v;
}
}
__global__ void distribucionVelocidadX(double *vel, double fmax, double vphi, hiprandState_t *states, int seed){
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
seed = (unsigned int) (clock() * Idx);
hiprand_init(seed, 0, 0, states + Idx);
if (Idx < max_SPe) {
vel[Idx] = create_Velocities_X(fmax, vphi, hiprand_uniform(states + Idx), states); // Distribucion_X
}
}
__global__ void distribucionVelocidadY(double *vel1, double fmax1, double vphi1, hiprandState_t *states, int seed){
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
seed = (unsigned int) (clock() * Idx);
hiprand_init(seed, 0, 0, states + Idx);
if (Idx < max_SPe) {
vel1[Idx] = create_Velocities_Y(fmax1, vphi1, hiprand_uniform(states + Idx), states); // Distribucion_X
//vel_1[Idx] = distrib_vel_X(fmax, vphi, hiprand_uniform(states + Idx), states, N); // DIstribucion_Y
}
}
using namespace std;
int main(void) {
double razon_masas = 1.98e5; //m_i/m_e (plata)
double vphi_i_0; // masa Ion en la posicion 0;
double vphi_i_1; // masa Ion en la posicion 1;
double vphi_e_0; // masa electrones en la posicin 0;
double vphi_e_1; // masa electrones en la posicin 1;
double fi_Maxwell_0; //
double fi_Maxwell_1;
double fe_Maxwell_0;
double fe_Maxwell_1;
double vflux_i_0 = 1e3;
double vflux_i_1 = 1e3;
double vflux_e_0 =(sqrt(razon_masas)*vflux_i_0);
double vflux_e_1 =(sqrt(razon_masas)*vflux_i_1);
double vflux_i_magnitud=sqrt(vflux_i_0*vflux_i_0+vflux_i_1*vflux_i_1); // Velocidad de flujo inico (m/s) = sqrt(2*k_b*Te/(M_PI*m_i))
double vflux_e_magnitud=sqrt(vflux_e_0*vflux_e_0+vflux_e_1*vflux_e_1);
vphi_i_0=vflux_i_0/vflux_i_magnitud; // Velocidad trmica Inica (X)
vphi_e_0=vflux_e_0/vflux_i_magnitud; // Velocidad trmica Electrnica (X)
vphi_i_1=vflux_i_1/vflux_i_magnitud; // Velocidad trmica Inica (Y)
vphi_e_1=vflux_e_1/vflux_i_magnitud; // Velocidad trmica Electrnica (Y)
fi_Maxwell_0= (2./(M_PI*vphi_i_0)); // Valor Mximo de la funcin de distribucin Semi-Maxwelliana Inica (X)
fe_Maxwell_0= (2./(M_PI*vphi_e_0)); // Valor Mximo de la funcin de distribucin Semi-Maxwelliana electrnica
fi_Maxwell_1= (1./(M_PI*vphi_i_0)); // Valor Mximo de la funcin de distribucin Semi-Maxwelliana Inica
fe_Maxwell_1= (1./(M_PI*vphi_e_0)); // Valor Mximo de la funcin de distribucin Semi-Maxwelliana electrnica
int NTSPe, NTSPI, max_SPe_dt, max_SPi_dt;
int NTe = 1e5, NTI = 1e5;
int Factor_carga_e=10, Factor_carga_i=10;
NTSPe=NTe/Factor_carga_e;
NTSPI=NTI/Factor_carga_i;
int Kemision=10;
double dt=1.e-5;
int dt_emision=Kemision*dt;
max_SPe_dt= round((double)NTSPe*dt_emision);
max_SPi_dt=max_SPe_dt;
//////////////////////////////////////////////////////
int size = max_SPe* sizeof(double);
//Declaracin de las variables en el host.
double *vel_e_0;
double *vel_e_1;
double *vel_i_0;
double *vel_i_1;
vel_e_0 = (double *) malloc(size);
vel_e_1 = (double *) malloc(size);
vel_i_0 = (double *) malloc(size);
vel_i_1 = (double *) malloc(size);
//Declaracin de las variables en el Device
double *vel_e_0_d;
double *vel_e_1_d;
double *vel_i_0_d;
double *vel_i_1_d;
hipMalloc((void **) &vel_e_0_d, size);
hipMalloc((void **) &vel_e_1_d, size);
hipMalloc((void **) &vel_i_0_d, size);
hipMalloc((void **) &vel_i_1_d, size);
// crear la semilla para generar el nmero aleatorio
hiprandState_t *devStates;
hipMalloc((void **) &devStates, max_SPe * sizeof(hiprandState_t));
int seed = time(NULL);
//Cantidad de Hilos a correr en cada bloque.
float blockSize = 1024;
dim3 dimBlock(ceil(max_SPe/ blockSize), 1, 1);
dim3 dimGrid(blockSize, 1, 1);
///////////////////////////////////////////////////////////////////////////////////////////////////////
hipLaunchKernelGGL(( distribucionVelocidadX), dim3(blockSize), dim3(dimBlock), 0, 0, vel_e_0_d, fe_Maxwell_0, vphi_e_0, devStates, seed);
hipDeviceSynchronize();
hipLaunchKernelGGL(( distribucionVelocidadX), dim3(blockSize), dim3(dimBlock), 0, 0, vel_i_0_d, fi_Maxwell_0, vphi_i_0, devStates, seed);
hipDeviceSynchronize();
hipLaunchKernelGGL(( distribucionVelocidadY), dim3(blockSize), dim3(dimBlock), 0, 0, vel_e_1_d, fe_Maxwell_1, vphi_e_1, devStates, seed);
hipDeviceSynchronize();
hipLaunchKernelGGL(( distribucionVelocidadY), dim3(blockSize), dim3(dimBlock), 0, 0, vel_i_1_d, fi_Maxwell_1, vphi_i_1, devStates, seed);
hipDeviceSynchronize();
hipMemcpy(vel_e_0, vel_e_0_d, size, hipMemcpyDeviceToHost);
hipMemcpy(vel_i_0, vel_i_0_d, size, hipMemcpyDeviceToHost);
hipMemcpy(vel_e_1, vel_e_1_d, size, hipMemcpyDeviceToHost);
hipMemcpy(vel_i_1, vel_i_1_d, size, hipMemcpyDeviceToHost);
ofstream init;
init.open("velocidad_X");//se escribe un archivo de salida para analizar los datos. la salida corresponde al potencial electrostatico en cada celda conocido como phi.
for (int i = 0; i < max_SPe; i++){
init<<vel_e_0[i]<<" "<<vel_i_0[i]<<" "<<vel_e_1[i]<<" "<<vel_i_1[i]<<"\n";
}
init<<endl;
init.close();
printf("hola");
return 0;
}
| 4c77c50565151c92e6ccfb482c5ec09cb495f0c3.cu | #include <iostream>
#include <cstdlib>
#include <cmath>
#include <cstdio>
#include <complex>
#include <ctime>
#include <cstring>
#include <fstream>
#include <complex.h>
#include <fftw3.h>
#include <curand_kernel.h>
//Magnitudes f.
#define m_e 9.10938291e-31
#define e 1.6021e-19
#define m_e 9.10938291e-31 // Masa del Electrón
#define e 1.6021e-19 // Carga del Electrón
#define k_b 1.3806504e-23 // Constante de Boltzmann
#define epsilon_0 8.854187e-12 // Permitividad eléctrica del vacío
#define max_SPe 10000 // Limite (computacional) de Superpartículas electrónicas
#define max_SPi 10000 // Limite (computacional) de Superpartículas iónicas
#define J_X 4096 // Número de puntos de malla X
#define J_Y 1024 // Número de puntos de malla Y
int le=0, li=0,kt;
//Distribución números aleatorios para las coordenadas x.
__device__ double create_Velocities_X(double fmax, double vphi, double aleatorio, curandState *states) // función para generar distribución semi-maxwelliana de velocidades de las particulas
// (Ver pág. 291 Computational Physics Fitzpatrick: Distribution functions--> Rejection Method)
{
double sigma=vphi; // sigma=vflujo=vth ( "dispersión" de la distribución Maxweliana)
double vmin= 0. ; // Rapidez mínima
double vmax= 4.*sigma; // Rapidez máxima
double v,f,f_random;
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
while (true) {
v=vmin+(vmax-vmin)*aleatorio; // Calcular valor aleatorio de v uniformemente distribuido en el rango [vmin,vmax]
f =fmax*exp(-(1.0/M_PI)*pow(v/vphi,2)); //
f_random = fmax*aleatorio; // Calcular valor aleatorio de f uniformemente distribuido en el rango [0,fmax]
if (f_random > f)
aleatorio = curand_uniform(states + Idx);
else
return v;
}
}
// Funcion de distribución para la coordenadas en y.
__device__ double create_Velocities_Y(double fmax1, double vphi1, double aleatorio, curandState *states) // función para generar distribución semi-maxwelliana de velocidades de las particulas
// (Ver pág. 291 Computational Physics Fitzpatrick: Distribution functions--> Rejection Method)
{
double sigma=vphi1; // sigma=vflujo=vth ( "dispersión" de la distribución Maxweliana)
double vmin= -3.*sigma; // Rapidez mínima
double vmax= 3.*sigma; // Rapidez máxima
double v,f,f_random;
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
while (true) {
v=vmin+(vmax-vmin)*aleatorio; // Calcular valor aleatorio de v uniformemente distribuido en el rango [vmin,vmax]
f =fmax1*exp(-(1.0/M_PI)*pow(v/vphi1,2)); //
f_random = fmax1*aleatorio; // Calcular valor aleatorio de f uniformemente distribuido en el rango [0,fmax]
if (f_random > f)
aleatorio = curand_uniform(states + Idx);
else
return v;
}
}
__global__ void distribucionVelocidadX(double *vel, double fmax, double vphi, curandState *states, int seed){
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
seed = (unsigned int) (clock() * Idx);
curand_init(seed, 0, 0, states + Idx);
if (Idx < max_SPe) {
vel[Idx] = create_Velocities_X(fmax, vphi, curand_uniform(states + Idx), states); // Distribucion_X
}
}
__global__ void distribucionVelocidadY(double *vel1, double fmax1, double vphi1, curandState *states, int seed){
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
seed = (unsigned int) (clock() * Idx);
curand_init(seed, 0, 0, states + Idx);
if (Idx < max_SPe) {
vel1[Idx] = create_Velocities_Y(fmax1, vphi1, curand_uniform(states + Idx), states); // Distribucion_X
//vel_1[Idx] = distrib_vel_X(fmax, vphi, curand_uniform(states + Idx), states, N); // DIstribucion_Y
}
}
using namespace std;
int main(void) {
double razon_masas = 1.98e5; //m_i/m_e (plata)
double vphi_i_0; // masa Ion en la posicion 0;
double vphi_i_1; // masa Ion en la posicion 1;
double vphi_e_0; // masa electrones en la posición 0;
double vphi_e_1; // masa electrones en la posición 1;
double fi_Maxwell_0; //
double fi_Maxwell_1;
double fe_Maxwell_0;
double fe_Maxwell_1;
double vflux_i_0 = 1e3;
double vflux_i_1 = 1e3;
double vflux_e_0 =(sqrt(razon_masas)*vflux_i_0);
double vflux_e_1 =(sqrt(razon_masas)*vflux_i_1);
double vflux_i_magnitud=sqrt(vflux_i_0*vflux_i_0+vflux_i_1*vflux_i_1); // Velocidad de flujo iónico (m/s) = sqrt(2*k_b*Te/(M_PI*m_i))
double vflux_e_magnitud=sqrt(vflux_e_0*vflux_e_0+vflux_e_1*vflux_e_1);
vphi_i_0=vflux_i_0/vflux_i_magnitud; // Velocidad térmica Iónica (X)
vphi_e_0=vflux_e_0/vflux_i_magnitud; // Velocidad térmica Electrónica (X)
vphi_i_1=vflux_i_1/vflux_i_magnitud; // Velocidad térmica Iónica (Y)
vphi_e_1=vflux_e_1/vflux_i_magnitud; // Velocidad térmica Electrónica (Y)
fi_Maxwell_0= (2./(M_PI*vphi_i_0)); // Valor Máximo de la función de distribución Semi-Maxwelliana Iónica (X)
fe_Maxwell_0= (2./(M_PI*vphi_e_0)); // Valor Máximo de la función de distribución Semi-Maxwelliana electrónica
fi_Maxwell_1= (1./(M_PI*vphi_i_0)); // Valor Máximo de la función de distribución Semi-Maxwelliana Iónica
fe_Maxwell_1= (1./(M_PI*vphi_e_0)); // Valor Máximo de la función de distribución Semi-Maxwelliana electrónica
int NTSPe, NTSPI, max_SPe_dt, max_SPi_dt;
int NTe = 1e5, NTI = 1e5;
int Factor_carga_e=10, Factor_carga_i=10;
NTSPe=NTe/Factor_carga_e;
NTSPI=NTI/Factor_carga_i;
int Kemision=10;
double dt=1.e-5;
int dt_emision=Kemision*dt;
max_SPe_dt= round((double)NTSPe*dt_emision);
max_SPi_dt=max_SPe_dt;
//////////////////////////////////////////////////////
int size = max_SPe* sizeof(double);
//Declaración de las variables en el host.
double *vel_e_0;
double *vel_e_1;
double *vel_i_0;
double *vel_i_1;
vel_e_0 = (double *) malloc(size);
vel_e_1 = (double *) malloc(size);
vel_i_0 = (double *) malloc(size);
vel_i_1 = (double *) malloc(size);
//Declaración de las variables en el Device
double *vel_e_0_d;
double *vel_e_1_d;
double *vel_i_0_d;
double *vel_i_1_d;
cudaMalloc((void **) &vel_e_0_d, size);
cudaMalloc((void **) &vel_e_1_d, size);
cudaMalloc((void **) &vel_i_0_d, size);
cudaMalloc((void **) &vel_i_1_d, size);
// crear la semilla para generar el número aleatorio
curandState *devStates;
cudaMalloc((void **) &devStates, max_SPe * sizeof(curandState));
int seed = time(NULL);
//Cantidad de Hilos a correr en cada bloque.
float blockSize = 1024;
dim3 dimBlock(ceil(max_SPe/ blockSize), 1, 1);
dim3 dimGrid(blockSize, 1, 1);
///////////////////////////////////////////////////////////////////////////////////////////////////////
distribucionVelocidadX<<<blockSize, dimBlock>>>(vel_e_0_d, fe_Maxwell_0, vphi_e_0, devStates, seed);
cudaDeviceSynchronize();
distribucionVelocidadX<<<blockSize, dimBlock>>>(vel_i_0_d, fi_Maxwell_0, vphi_i_0, devStates, seed);
cudaDeviceSynchronize();
distribucionVelocidadY<<<blockSize, dimBlock>>>(vel_e_1_d, fe_Maxwell_1, vphi_e_1, devStates, seed);
cudaDeviceSynchronize();
distribucionVelocidadY<<<blockSize, dimBlock>>>(vel_i_1_d, fi_Maxwell_1, vphi_i_1, devStates, seed);
cudaDeviceSynchronize();
cudaMemcpy(vel_e_0, vel_e_0_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vel_i_0, vel_i_0_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vel_e_1, vel_e_1_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vel_i_1, vel_i_1_d, size, cudaMemcpyDeviceToHost);
ofstream init;
init.open("velocidad_X");//se escribe un archivo de salida para analizar los datos. la salida corresponde al potencial electrostatico en cada celda conocido como phi.
for (int i = 0; i < max_SPe; i++){
init<<vel_e_0[i]<<" "<<vel_i_0[i]<<" "<<vel_e_1[i]<<" "<<vel_i_1[i]<<"\n";
}
init<<endl;
init.close();
printf("hola");
return 0;
}
|
c179ed352b461af117cab0d8af4db9d547f92c6d.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include "cuda_physalis.h"
#include "cuda_particle.h"
#include <helper_cuda.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
__constant__ real _A1;
__constant__ real _A2;
__constant__ real _A3;
__constant__ real _B;
__constant__ int _nn[NCOEFFS];
__constant__ int _mm[NCOEFFS];
__constant__ real _node_t[NNODES];
__constant__ real _node_p[NNODES];
real *_int_Yp_re;
real *_int_Yp_im;
real *_int_rDYu_re;
real *_int_rDYu_im;
real *_int_xXDYu_re;
real *_int_xXDYu_im;
real *_sum_send_e;
real *_sum_send_w;
real *_sum_send_n;
real *_sum_send_s;
real *_sum_send_t;
real *_sum_send_b;
real *_sum_recv_e;
real *_sum_recv_w;
real *_sum_recv_n;
real *_sum_recv_s;
real *_sum_recv_t;
real *_sum_recv_b;
extern "C"
void cuda_init_physalis(void)
{
if (NPARTS > 0) {
/* set up coefficient table */
int nn[NCOEFFS] = {0,
1, 1,
2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4, 4};
int mm[NCOEFFS] = {0,
0, 1,
0, 1, 2,
0, 1, 2, 3,
0, 1, 2, 3, 4};
/* set up quadrature nodes for 7th-order Lebedev quadrature */
// NOTE: Higher order quadratures exist as comments in bluebottle, in
// cuda_quadrature.cu:cuda_Lamb()
real PI14 = 0.25 * PI;
real PI12 = 0.5 * PI;
real PI34 = 0.75 * PI;
real PI54 = 1.25 * PI;
real PI32 = 1.5 * PI;
real PI74 = 1.75 * PI;
real alph1 = 0.955316618124509;
real alph2 = 2.186276035465284;
/* weights */
real A1 = 0.598398600683775;
real A2 = 0.478718880547015;
real A3 = 0.403919055461543;
real B = 0.;
/* nodes */
// Find a more elegant way of fixing the divide by sin(0)
real a1_t[6] = {PI12, PI12, PI12, PI12, 0.+DIV_ST, PI-DIV_ST};
real a1_p[6] = {0., PI12, PI, PI32, 0., 0.};
real a2_t[12] = {PI12, PI12, PI12, PI12,
PI14, PI14, PI14, PI14,
PI34, PI34, PI34, PI34};
real a2_p[12] = {PI14, PI34, PI54, PI74,
0., PI12, PI, PI32,
0., PI12, PI, PI32};
real a3_t[8] = {alph1, alph1, alph1, alph1,
alph2, alph2, alph2, alph2};
real a3_p[8] = {PI14, PI34, PI54, PI74,
PI14, PI34, PI54, PI74};
/* put all quadrature nodes together for interpolation */
real node_t[NNODES];
real node_p[NNODES];
for (int i = 0; i < 6; i++) {
node_t[i] = a1_t[i];
node_p[i] = a1_p[i];
}
for (int i = 0; i < 12; i++) {
node_t[6+i] = a2_t[i];
node_p[6+i] = a2_p[i];
}
for (int i = 0; i < 8; i++) {
node_t[18+i] = a3_t[i];
node_p[18+i] = a3_p[i];
}
/* Bind to cuda device constant memory */
checkCudaErrors(hipMemcpyToSymbol(_nn, &nn, NCOEFFS * sizeof(int)));
checkCudaErrors(hipMemcpyToSymbol(_mm, &mm, NCOEFFS * sizeof(int)));
checkCudaErrors(hipMemcpyToSymbol(_A1, &A1, sizeof(real)));
checkCudaErrors(hipMemcpyToSymbol(_A2, &A2, sizeof(real)));
checkCudaErrors(hipMemcpyToSymbol(_A3, &A3, sizeof(real)));
checkCudaErrors(hipMemcpyToSymbol(_B, &B, sizeof(real)));
checkCudaErrors(hipMemcpyToSymbol(_node_t, &node_t, NNODES * sizeof(real)));
checkCudaErrors(hipMemcpyToSymbol(_node_p, &node_p, NNODES * sizeof(real)));
}
}
extern "C"
void cuda_lamb(void)
{
/* CUDA exec config */
dim3 num_parts(nparts); // nparts blocks with nnodes threads each
dim3 dim_nodes(NNODES);
dim3 num_partcoeff(nparts, ncoeffs_max);
dim3 dim_coeff(ncoeffs_max);
//printf("N%d >> Determining Lamb's coefficients (nparts = %d)\n", rank, nparts);
if (nparts > 0) {
/* Temp storage for field variables at quadrature nodes */
real *_pp; // pressure
real *_ur; // radial velocity
real *_ut; // theta velocity
real *_up; // phi velocity
checkCudaErrors(hipMalloc(&_pp, NNODES * nparts * sizeof(real)));
checkCudaErrors(hipMalloc(&_ur, NNODES * nparts * sizeof(real)));
checkCudaErrors(hipMalloc(&_ut, NNODES * nparts * sizeof(real)));
checkCudaErrors(hipMalloc(&_up, NNODES * nparts * sizeof(real)));
/* Interpolate field varaibles to quadrature nodes */
hipLaunchKernelGGL(( check_nodes), dim3(num_parts), dim3(dim_nodes), 0, 0, nparts, _parts, _bc, _DOM);
hipLaunchKernelGGL(( interpolate_nodes), dim3(num_parts), dim3(dim_nodes), 0, 0, _p, _u, _v, _w, rho_f, nu,
gradP, _parts, _pp, _ur, _ut, _up, _bc);
/* Create scalar product storage using max particle coefficient size */
int sp_size = nparts * NNODES * ncoeffs_max;
checkCudaErrors(hipMalloc(&_int_Yp_re, sp_size * sizeof(real)));
checkCudaErrors(hipMalloc(&_int_Yp_im, sp_size * sizeof(real)));
checkCudaErrors(hipMalloc(&_int_rDYu_re, sp_size * sizeof(real)));
checkCudaErrors(hipMalloc(&_int_rDYu_im, sp_size * sizeof(real)));
checkCudaErrors(hipMalloc(&_int_xXDYu_re, sp_size * sizeof(real)));
checkCudaErrors(hipMalloc(&_int_xXDYu_im, sp_size * sizeof(real)));
/* Perform partial sums of lebedev quadrature */
hipLaunchKernelGGL(( lebedev_quadrature), dim3(num_partcoeff), dim3(dim_nodes), 0, 0, _parts, ncoeffs_max,
_pp, _ur, _ut, _up,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
checkCudaErrors(hipFree(_pp));
checkCudaErrors(hipFree(_ur));
checkCudaErrors(hipFree(_ut));
checkCudaErrors(hipFree(_up));
}
/* Accumulate partial sums (all procs need to be involved) */
cuda_partial_sum_i(); // 2a) Calculate partial sums over x face
cuda_partial_sum_j(); // 2b) Calculate partial sums over y face
cuda_partial_sum_k(); // 2c) Calculate partial sums over z face
if (nparts > 0) {
/* Compute lambs coefficients from partial sums */
hipLaunchKernelGGL(( compute_lambs_coeffs), dim3(num_parts), dim3(dim_coeff), 0, 0, _parts, lamb_relax, mu, nu,
ncoeffs_max, nparts,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
/* Calculate hydrodynamic forces */
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
hipLaunchKernelGGL(( calc_forces), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, gradP.x, gradP.y,
gradP.z, rho_f, mu, nu);
/* Free */
checkCudaErrors(hipFree(_int_Yp_re));
checkCudaErrors(hipFree(_int_Yp_im));
checkCudaErrors(hipFree(_int_rDYu_re));
checkCudaErrors(hipFree(_int_rDYu_im));
checkCudaErrors(hipFree(_int_xXDYu_re));
checkCudaErrors(hipFree(_int_xXDYu_im));
}
}
extern "C"
void cuda_partial_sum_i(void)
{
//printf("N%d >> Communicating partial sums in i (nparts %d)\n", rank, nparts);
/* Outline of communication of partial sums for Lebedev integration
* 1) Finish local Lebedev integration in lebedev_quad<<<>>>. For a given
* scalar product, the partial sum for the jth coefficient of the nth
* particle is stored in: _int_someint[0 + NNODES*j + nparts*NNODES*n]
* 2) All particles at the outermost two bin planes need their sums
* accumulated (e.g., (j,k) planes at _bins.Gcc.{_isb->_is,_ie->_ieb})
* 3) Bin the particles using i indexing (find _bin_{start,end,count})
* 4) Reduce _bin_count at _isb:_is, _ie:_ieb to find nparts_send_{e,w}
* 5) Communicate nparts_send_{e,w} with adjacent subdomains to find
* nparts_recv_{w,e}
* 6) Excl. prefix scan _bin_count over the _isb:_is, _ie:_ieb planes to find
* destination index for particle data packed into sending aray
* 7) Allocate send array, int_send_{e,w} * 6 * sizeof(real). 6 comes from
* the number of integrals
* 8) Allocate recv array, int_recv_{e,w} * 6 * sizeof(real).
* 9) Communicate int_send_{e,w} to int_recv_{e,w}
* 10) Excl. prefix scan _bin_count over _isb:_is, _ie:_ieb planes to find unpacking
* incides - this already exists from earlier
* 11) Unpack and accumulate
* 12) Repeat for j, k
*/
/* Initialize execution config */
// Thread over east/west faces
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_e;
int *_offset_w;
checkCudaErrors(hipMalloc(&_offset_e, 2 * bins.Gcc.s2b_i * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_w, 2 * bins.Gcc.s2b_i * sizeof(int)));
thrust::device_ptr<int> t_offset_e(_offset_e);
thrust::device_ptr<int> t_offset_w(_offset_w);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_i), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_i), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
s1b = bins.Gcc.jnb;
s2b = s1b * bins.Gcc.knb;
// East: _ie and _ieb planes
if (dom[rank].e != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ie plane
offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b);
nparts_send[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[EAST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_e);
} else {
hipMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
} else { // no parts to send
nparts_send[EAST] = 0;
hipMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
// West: _isb and _is planes
if (dom[rank].w != MPI_PROC_NULL) {
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
nparts_send[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i,
0., thrust::plus<int>());
if (nparts_send[WEST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_w);
} else {
hipMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
} else {
nparts_send[WEST] = 0;
hipMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[EAST] = 0;
nparts_send[WEST] = 0;
hipMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
hipMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
// Sending and receiving is the same since the outer two bin planes are shared
nparts_recv[EAST] = nparts_send[EAST];
nparts_recv[WEST] = nparts_send[WEST];
/* Send number of parts to east/west */
// origin target
// nparts_send[WEST] -> nparts_recv[EAST]
// nparts_recv[WEST] <- nparts_send[EAST]
//nparts_recv[WEST] = 0; // init
//nparts_recv[EAST] = 0;
//mpi_send_nparts_i();
/* Allocate memory for send and recv partial sums */
int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs
// Indexing is, for example:
// _sum_send_e[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id]
// where
// part_id = [0, nparts) and sp = [0, 6)
// 0: Yp_re 1: Yp_im
// 2: rDYu_re 3: rDYu_im
// 4: xXDYu_re 5: xXDYu_im
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0);
int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0);
int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0);
int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0);
checkCudaErrors(hipMalloc(&_sum_send_e, send_alloc_e*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_send_w, send_alloc_w*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_recv_e, recv_alloc_e*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_recv_w, recv_alloc_w*npsums*sizeof(real)));
/* Pack partial sums */
if (nparts_send[EAST] > 0) {
hipLaunchKernelGGL(( pack_sums_e), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_send_e, _offset_e,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//hipMemset(_sum_send_e, 0., send_alloc_e * npsums * sizeof(real));
}
if (nparts_send[WEST] > 0) {
hipLaunchKernelGGL(( pack_sums_w), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_send_w, _offset_w,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//hipMemset(_sum_send_w, 0., send_alloc_w * npsums * sizeof(real));
}
hipDeviceSynchronize(); // ensure packing is complete
/* Communicate partial sums with MPI */
mpi_send_psums_i();
// Offsets are the same since they're over both ghost bins and edge bins
/* Unpack and complete partial sums */
if (nparts_recv[EAST] > 0) {
hipLaunchKernelGGL(( unpack_sums_e), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_recv_e, _offset_e,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
if (nparts_recv[WEST] > 0) {
hipLaunchKernelGGL(( unpack_sums_w), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _sum_recv_w, _offset_w,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
hipDeviceSynchronize(); // ensure packing is complete
/* Free */
hipFree(_sum_send_e);
hipFree(_sum_send_w);
hipFree(_sum_recv_e);
hipFree(_sum_recv_w);
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_e);
hipFree(_offset_w);
}
extern "C"
void cuda_partial_sum_j(void)
{
//printf("N%d >> Communicating partial sums in j\n", rank);
/* Initialize execution config */
// Thread over north/south faces
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_n;
int *_offset_s;
checkCudaErrors(hipMalloc(&_offset_n, 2 * bins.Gcc.s2b_j * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_s, 2 * bins.Gcc.s2b_j * sizeof(int)));
thrust::device_ptr<int> t_offset_n(_offset_n);
thrust::device_ptr<int> t_offset_s(_offset_s);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_j), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_j), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
s1b = bins.Gcc.knb;
s2b = s1b * bins.Gcc.inb;
// North: _je and _jeb planes
if (dom[rank].n != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _je plane
offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b);
nparts_send[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[NORTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_n);
} else {
hipMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
} else { // no parts to send
nparts_send[NORTH] = 0;
hipMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
// South: _jsb and _js planes
if (dom[rank].s != MPI_PROC_NULL) {
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j,
0., thrust::plus<int>());
if (nparts_send[SOUTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_s);
} else {
hipMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
} else {
nparts_send[SOUTH] = 0;
hipMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
} else { // nparts == 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[NORTH] = 0;
nparts_send[SOUTH] = 0;
hipMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
hipMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
// Sending and receiving is the same since the outer two bin planes are shared
nparts_recv[NORTH] = nparts_send[NORTH];
nparts_recv[SOUTH] = nparts_send[SOUTH];
/* Send number of parts to north/south */
// origin target
// nparts_send[SOUTH] -> nparts_recv[NORTH]
// nparts_recv[SOUTH] <- nparts_send[NORTH]
//nparts_recv[SOUTH] = 0; // init
//nparts_recv[NORTH] = 0;
//mpi_send_nparts_j();
/* Allocate memory for send and recv partial sums */
int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs
// Indexing is, for example:
// _sum_send_n[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id]
// where
// part_id = [0, nparts) and sp = [0, 6)
// 0: Yp_re 1: Yp_im
// 2: rDYu_re 3: rDYu_im
// 4: xXDYu_re 5: xXDYu_im
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0);
int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0);
int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0);
int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0);
checkCudaErrors(hipMalloc(&_sum_send_n, send_alloc_n*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_send_s, send_alloc_s*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_recv_n, recv_alloc_n*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_recv_s, recv_alloc_s*npsums*sizeof(real)));
/* Pack partial sums */
if (nparts_send[NORTH] > 0) {
hipLaunchKernelGGL(( pack_sums_n), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_send_n, _offset_n,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//hipMemset(_sum_send_n, 0., send_alloc_n * npsums * sizeof(real));
}
if (nparts_send[SOUTH] > 0) {
hipLaunchKernelGGL(( pack_sums_s), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_send_s, _offset_s,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//hipMemset(_sum_send_s, 0., send_alloc_s * npsums * sizeof(real));
}
hipDeviceSynchronize(); // ensure packing is complete
/* Communicate partial sums with MPI */
mpi_send_psums_j();
// Offsets are the same since they're over both ghost bins and edge bins
/* Unpack and complete partial sums */
if (nparts_recv[NORTH] > 0) {
hipLaunchKernelGGL(( unpack_sums_n), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_recv_n, _offset_n,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
if (nparts_recv[SOUTH] > 0) {
hipLaunchKernelGGL(( unpack_sums_s), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _sum_recv_s, _offset_s,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
hipDeviceSynchronize(); // ensure packing is complete
/* Free */
hipFree(_sum_send_n);
hipFree(_sum_send_s);
hipFree(_sum_recv_n);
hipFree(_sum_recv_s);
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_n);
hipFree(_offset_s);
}
extern "C"
void cuda_partial_sum_k(void)
{
//printf("N%d >> Communicating partial sums in k\n", rank);
/* Initialize execution config */
// Thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_t;
int *_offset_b;
checkCudaErrors(hipMalloc(&_offset_t, 2 * bins.Gcc.s2b_k * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_b, 2 * bins.Gcc.s2b_k * sizeof(int)));
thrust::device_ptr<int> t_offset_t(_offset_t);
thrust::device_ptr<int> t_offset_b(_offset_b);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_k), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
s1b = bins.Gcc.inb;
s2b = s1b * bins.Gcc.jnb;
// North: _ke and _keb planes
if (dom[rank].t != MPI_PROC_NULL) {
// _bin_count is indexed with k varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ke plane
offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b);
nparts_send[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_t);
} else {
hipMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
} else { // no parts to send
nparts_send[TOP] = 0;
hipMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
// South: _ksb and _ks planes
if (dom[rank].b != MPI_PROC_NULL) {
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k,
0., thrust::plus<int>());
if (nparts_send[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_b);
} else {
hipMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
} else {
nparts_send[BOTTOM] = 0;
hipMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
} else { // nparts = 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[TOP] = 0;
nparts_send[BOTTOM] = 0;
hipMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
hipMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
// Sending and receiving is the same since the outer two bin planes are shared
nparts_recv[TOP] = nparts_send[TOP];
nparts_recv[BOTTOM] = nparts_send[BOTTOM];
/* Send number of parts to top/bottom */
// origin target
// nparts_send[BOTTOM] -> nparts_recv[TOP]
// nparts_recv[BOTTOM] <- nparts_send[TOP]
//nparts_recv[BOTTOM] = 0; // init
//nparts_recv[TOP] = 0;
//mpi_send_nparts_k();
/* Allocate memory for send and recv partial sums */
int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs
// Indexing is, for example:
// _sum_send_t[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id]
// where
// part_id = [0, nparts) and sp = [0, 6)
// 0: Yp_re 1: Yp_im
// 2: rDYu_re 3: rDYu_im
// 4: xXDYu_re 5: xXDYu_im
int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0);
int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0);
int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0);
int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0);
checkCudaErrors(hipMalloc(&_sum_send_t, send_alloc_t*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_send_b, send_alloc_b*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_recv_t, recv_alloc_t*npsums*sizeof(real)));
checkCudaErrors(hipMalloc(&_sum_recv_b, recv_alloc_b*npsums*sizeof(real)));
/* Pack partial sums */
if (nparts_send[TOP] > 0) {
hipLaunchKernelGGL(( pack_sums_t), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_send_t, _offset_t,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//hipMemset(_sum_send_t, 0., send_alloc_t * npsums * sizeof(real));
}
if (nparts_send[BOTTOM] > 0) {
hipLaunchKernelGGL(( pack_sums_b), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_send_b, _offset_b,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//hipMemset(_sum_send_b, 0., send_alloc_b * npsums * sizeof(real));
}
hipDeviceSynchronize(); // ensure packing is complete
/* Communicate partial sums with MPI */
mpi_send_psums_k();
// Offsets are the same since they're over both ghost bins and edge bins
/* Unpack and complete partial sums */
if (nparts_recv[TOP] > 0) {
hipLaunchKernelGGL(( unpack_sums_t), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_recv_t, _offset_t,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
if (nparts_recv[BOTTOM] > 0) {
hipLaunchKernelGGL(( unpack_sums_b), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _sum_recv_b, _offset_b,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
hipDeviceSynchronize(); // ensure packing is complete
/* Free */
hipFree(_sum_send_t);
hipFree(_sum_send_b);
hipFree(_sum_recv_t);
hipFree(_sum_recv_b);
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_t);
hipFree(_offset_b);
}
extern "C"
real cuda_lamb_err(void)
{
//printf("N%d >> Determining Lamb's error\n", rank);
real error = DBL_MIN;
if (nparts > 0) {
// create a place to store sorted coefficients and errors
real *_part_errors;
hipMalloc((void**) &_part_errors, nparts*sizeof(real));
// sort the coefficients and calculate errors along the way
dim3 numBlocks(nparts);
dim3 dimBlocks(ncoeffs_max);
hipLaunchKernelGGL(( compute_error), dim3(numBlocks), dim3(dimBlocks), 0, 0, lamb_cut, ncoeffs_max, nparts,
_parts, _part_errors);
// find maximum error of all particles
thrust::device_ptr<real> t_part_errors(_part_errors);
error = thrust::reduce(t_part_errors,
t_part_errors + nparts,
0., thrust::maximum<real>());
// clean up
hipFree(_part_errors);
// store copy of coefficients for future calculation
hipLaunchKernelGGL(( store_coeffs), dim3(numBlocks), dim3(dimBlocks), 0, 0, _parts, nparts, ncoeffs_max);
}
// MPI reduce to find max error
MPI_Allreduce(MPI_IN_PLACE, &error, 1, mpi_real, MPI_MAX, MPI_COMM_WORLD);
return error;
}
| c179ed352b461af117cab0d8af4db9d547f92c6d.cu | /*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include <cuda.h>
#include <thrust/sort.h>
#include "cuda_physalis.h"
#include "cuda_particle.h"
#include <helper_cuda.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
__constant__ real _A1;
__constant__ real _A2;
__constant__ real _A3;
__constant__ real _B;
__constant__ int _nn[NCOEFFS];
__constant__ int _mm[NCOEFFS];
__constant__ real _node_t[NNODES];
__constant__ real _node_p[NNODES];
real *_int_Yp_re;
real *_int_Yp_im;
real *_int_rDYu_re;
real *_int_rDYu_im;
real *_int_xXDYu_re;
real *_int_xXDYu_im;
real *_sum_send_e;
real *_sum_send_w;
real *_sum_send_n;
real *_sum_send_s;
real *_sum_send_t;
real *_sum_send_b;
real *_sum_recv_e;
real *_sum_recv_w;
real *_sum_recv_n;
real *_sum_recv_s;
real *_sum_recv_t;
real *_sum_recv_b;
extern "C"
void cuda_init_physalis(void)
{
if (NPARTS > 0) {
/* set up coefficient table */
int nn[NCOEFFS] = {0,
1, 1,
2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4, 4};
int mm[NCOEFFS] = {0,
0, 1,
0, 1, 2,
0, 1, 2, 3,
0, 1, 2, 3, 4};
/* set up quadrature nodes for 7th-order Lebedev quadrature */
// NOTE: Higher order quadratures exist as comments in bluebottle, in
// cuda_quadrature.cu:cuda_Lamb()
real PI14 = 0.25 * PI;
real PI12 = 0.5 * PI;
real PI34 = 0.75 * PI;
real PI54 = 1.25 * PI;
real PI32 = 1.5 * PI;
real PI74 = 1.75 * PI;
real alph1 = 0.955316618124509;
real alph2 = 2.186276035465284;
/* weights */
real A1 = 0.598398600683775;
real A2 = 0.478718880547015;
real A3 = 0.403919055461543;
real B = 0.;
/* nodes */
// Find a more elegant way of fixing the divide by sin(0)
real a1_t[6] = {PI12, PI12, PI12, PI12, 0.+DIV_ST, PI-DIV_ST};
real a1_p[6] = {0., PI12, PI, PI32, 0., 0.};
real a2_t[12] = {PI12, PI12, PI12, PI12,
PI14, PI14, PI14, PI14,
PI34, PI34, PI34, PI34};
real a2_p[12] = {PI14, PI34, PI54, PI74,
0., PI12, PI, PI32,
0., PI12, PI, PI32};
real a3_t[8] = {alph1, alph1, alph1, alph1,
alph2, alph2, alph2, alph2};
real a3_p[8] = {PI14, PI34, PI54, PI74,
PI14, PI34, PI54, PI74};
/* put all quadrature nodes together for interpolation */
real node_t[NNODES];
real node_p[NNODES];
for (int i = 0; i < 6; i++) {
node_t[i] = a1_t[i];
node_p[i] = a1_p[i];
}
for (int i = 0; i < 12; i++) {
node_t[6+i] = a2_t[i];
node_p[6+i] = a2_p[i];
}
for (int i = 0; i < 8; i++) {
node_t[18+i] = a3_t[i];
node_p[18+i] = a3_p[i];
}
/* Bind to cuda device constant memory */
checkCudaErrors(cudaMemcpyToSymbol(_nn, &nn, NCOEFFS * sizeof(int)));
checkCudaErrors(cudaMemcpyToSymbol(_mm, &mm, NCOEFFS * sizeof(int)));
checkCudaErrors(cudaMemcpyToSymbol(_A1, &A1, sizeof(real)));
checkCudaErrors(cudaMemcpyToSymbol(_A2, &A2, sizeof(real)));
checkCudaErrors(cudaMemcpyToSymbol(_A3, &A3, sizeof(real)));
checkCudaErrors(cudaMemcpyToSymbol(_B, &B, sizeof(real)));
checkCudaErrors(cudaMemcpyToSymbol(_node_t, &node_t, NNODES * sizeof(real)));
checkCudaErrors(cudaMemcpyToSymbol(_node_p, &node_p, NNODES * sizeof(real)));
}
}
extern "C"
void cuda_lamb(void)
{
/* CUDA exec config */
dim3 num_parts(nparts); // nparts blocks with nnodes threads each
dim3 dim_nodes(NNODES);
dim3 num_partcoeff(nparts, ncoeffs_max);
dim3 dim_coeff(ncoeffs_max);
//printf("N%d >> Determining Lamb's coefficients (nparts = %d)\n", rank, nparts);
if (nparts > 0) {
/* Temp storage for field variables at quadrature nodes */
real *_pp; // pressure
real *_ur; // radial velocity
real *_ut; // theta velocity
real *_up; // phi velocity
checkCudaErrors(cudaMalloc(&_pp, NNODES * nparts * sizeof(real)));
checkCudaErrors(cudaMalloc(&_ur, NNODES * nparts * sizeof(real)));
checkCudaErrors(cudaMalloc(&_ut, NNODES * nparts * sizeof(real)));
checkCudaErrors(cudaMalloc(&_up, NNODES * nparts * sizeof(real)));
/* Interpolate field varaibles to quadrature nodes */
check_nodes<<<num_parts, dim_nodes>>>(nparts, _parts, _bc, _DOM);
interpolate_nodes<<<num_parts, dim_nodes>>>(_p, _u, _v, _w, rho_f, nu,
gradP, _parts, _pp, _ur, _ut, _up, _bc);
/* Create scalar product storage using max particle coefficient size */
int sp_size = nparts * NNODES * ncoeffs_max;
checkCudaErrors(cudaMalloc(&_int_Yp_re, sp_size * sizeof(real)));
checkCudaErrors(cudaMalloc(&_int_Yp_im, sp_size * sizeof(real)));
checkCudaErrors(cudaMalloc(&_int_rDYu_re, sp_size * sizeof(real)));
checkCudaErrors(cudaMalloc(&_int_rDYu_im, sp_size * sizeof(real)));
checkCudaErrors(cudaMalloc(&_int_xXDYu_re, sp_size * sizeof(real)));
checkCudaErrors(cudaMalloc(&_int_xXDYu_im, sp_size * sizeof(real)));
/* Perform partial sums of lebedev quadrature */
lebedev_quadrature<<<num_partcoeff, dim_nodes>>>(_parts, ncoeffs_max,
_pp, _ur, _ut, _up,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
checkCudaErrors(cudaFree(_pp));
checkCudaErrors(cudaFree(_ur));
checkCudaErrors(cudaFree(_ut));
checkCudaErrors(cudaFree(_up));
}
/* Accumulate partial sums (all procs need to be involved) */
cuda_partial_sum_i(); // 2a) Calculate partial sums over x face
cuda_partial_sum_j(); // 2b) Calculate partial sums over y face
cuda_partial_sum_k(); // 2c) Calculate partial sums over z face
if (nparts > 0) {
/* Compute lambs coefficients from partial sums */
compute_lambs_coeffs<<<num_parts, dim_coeff>>>(_parts, lamb_relax, mu, nu,
ncoeffs_max, nparts,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
/* Calculate hydrodynamic forces */
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
calc_forces<<<num_nparts, dim_nparts>>>(_parts, nparts, gradP.x, gradP.y,
gradP.z, rho_f, mu, nu);
/* Free */
checkCudaErrors(cudaFree(_int_Yp_re));
checkCudaErrors(cudaFree(_int_Yp_im));
checkCudaErrors(cudaFree(_int_rDYu_re));
checkCudaErrors(cudaFree(_int_rDYu_im));
checkCudaErrors(cudaFree(_int_xXDYu_re));
checkCudaErrors(cudaFree(_int_xXDYu_im));
}
}
extern "C"
void cuda_partial_sum_i(void)
{
//printf("N%d >> Communicating partial sums in i (nparts %d)\n", rank, nparts);
/* Outline of communication of partial sums for Lebedev integration
* 1) Finish local Lebedev integration in lebedev_quad<<<>>>. For a given
* scalar product, the partial sum for the jth coefficient of the nth
* particle is stored in: _int_someint[0 + NNODES*j + nparts*NNODES*n]
* 2) All particles at the outermost two bin planes need their sums
* accumulated (e.g., (j,k) planes at _bins.Gcc.{_isb->_is,_ie->_ieb})
* 3) Bin the particles using i indexing (find _bin_{start,end,count})
* 4) Reduce _bin_count at _isb:_is, _ie:_ieb to find nparts_send_{e,w}
* 5) Communicate nparts_send_{e,w} with adjacent subdomains to find
* nparts_recv_{w,e}
* 6) Excl. prefix scan _bin_count over the _isb:_is, _ie:_ieb planes to find
* destination index for particle data packed into sending aray
* 7) Allocate send array, int_send_{e,w} * 6 * sizeof(real). 6 comes from
* the number of integrals
* 8) Allocate recv array, int_recv_{e,w} * 6 * sizeof(real).
* 9) Communicate int_send_{e,w} to int_recv_{e,w}
* 10) Excl. prefix scan _bin_count over _isb:_is, _ie:_ieb planes to find unpacking
* incides - this already exists from earlier
* 11) Unpack and accumulate
* 12) Repeat for j, k
*/
/* Initialize execution config */
// Thread over east/west faces
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_e;
int *_offset_w;
checkCudaErrors(cudaMalloc(&_offset_e, 2 * bins.Gcc.s2b_i * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_w, 2 * bins.Gcc.s2b_i * sizeof(int)));
thrust::device_ptr<int> t_offset_e(_offset_e);
thrust::device_ptr<int> t_offset_w(_offset_w);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_i<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_i<<<bin_num_inb, bin_dim_inb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
s1b = bins.Gcc.jnb;
s2b = s1b * bins.Gcc.knb;
// East: _ie and _ieb planes
if (dom[rank].e != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ie plane
offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b);
nparts_send[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[EAST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_e);
} else {
cudaMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
} else { // no parts to send
nparts_send[EAST] = 0;
cudaMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
// West: _isb and _is planes
if (dom[rank].w != MPI_PROC_NULL) {
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
nparts_send[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i,
0., thrust::plus<int>());
if (nparts_send[WEST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_i, t_offset_w);
} else {
cudaMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
} else {
nparts_send[WEST] = 0;
cudaMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[EAST] = 0;
nparts_send[WEST] = 0;
cudaMemset(_offset_e, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
cudaMemset(_offset_w, 0., 2 * bins.Gcc.s2b_i * sizeof(int));
}
// Sending and receiving is the same since the outer two bin planes are shared
nparts_recv[EAST] = nparts_send[EAST];
nparts_recv[WEST] = nparts_send[WEST];
/* Send number of parts to east/west */
// origin target
// nparts_send[WEST] -> nparts_recv[EAST]
// nparts_recv[WEST] <- nparts_send[EAST]
//nparts_recv[WEST] = 0; // init
//nparts_recv[EAST] = 0;
//mpi_send_nparts_i();
/* Allocate memory for send and recv partial sums */
int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs
// Indexing is, for example:
// _sum_send_e[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id]
// where
// part_id = [0, nparts) and sp = [0, 6)
// 0: Yp_re 1: Yp_im
// 2: rDYu_re 3: rDYu_im
// 4: xXDYu_re 5: xXDYu_im
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0);
int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0);
int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0);
int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0);
checkCudaErrors(cudaMalloc(&_sum_send_e, send_alloc_e*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_send_w, send_alloc_w*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_recv_e, recv_alloc_e*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_recv_w, recv_alloc_w*npsums*sizeof(real)));
/* Pack partial sums */
if (nparts_send[EAST] > 0) {
pack_sums_e<<<bin_num_inb, bin_dim_inb>>>(_sum_send_e, _offset_e,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//cudaMemset(_sum_send_e, 0., send_alloc_e * npsums * sizeof(real));
}
if (nparts_send[WEST] > 0) {
pack_sums_w<<<bin_num_inb, bin_dim_inb>>>(_sum_send_w, _offset_w,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//cudaMemset(_sum_send_w, 0., send_alloc_w * npsums * sizeof(real));
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Communicate partial sums with MPI */
mpi_send_psums_i();
// Offsets are the same since they're over both ghost bins and edge bins
/* Unpack and complete partial sums */
if (nparts_recv[EAST] > 0) {
unpack_sums_e<<<bin_num_inb, bin_dim_inb>>>(_sum_recv_e, _offset_e,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
if (nparts_recv[WEST] > 0) {
unpack_sums_w<<<bin_num_inb, bin_dim_inb>>>(_sum_recv_w, _offset_w,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Free */
cudaFree(_sum_send_e);
cudaFree(_sum_send_w);
cudaFree(_sum_recv_e);
cudaFree(_sum_recv_w);
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_e);
cudaFree(_offset_w);
}
extern "C"
void cuda_partial_sum_j(void)
{
//printf("N%d >> Communicating partial sums in j\n", rank);
/* Initialize execution config */
// Thread over north/south faces
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_n;
int *_offset_s;
checkCudaErrors(cudaMalloc(&_offset_n, 2 * bins.Gcc.s2b_j * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_s, 2 * bins.Gcc.s2b_j * sizeof(int)));
thrust::device_ptr<int> t_offset_n(_offset_n);
thrust::device_ptr<int> t_offset_s(_offset_s);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_j<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_j<<<bin_num_jnb, bin_dim_jnb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
s1b = bins.Gcc.knb;
s2b = s1b * bins.Gcc.inb;
// North: _je and _jeb planes
if (dom[rank].n != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _je plane
offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b);
nparts_send[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[NORTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_n);
} else {
cudaMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
} else { // no parts to send
nparts_send[NORTH] = 0;
cudaMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
// South: _jsb and _js planes
if (dom[rank].s != MPI_PROC_NULL) {
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j,
0., thrust::plus<int>());
if (nparts_send[SOUTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_j, t_offset_s);
} else {
cudaMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
} else {
nparts_send[SOUTH] = 0;
cudaMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
} else { // nparts == 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[NORTH] = 0;
nparts_send[SOUTH] = 0;
cudaMemset(_offset_n, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
cudaMemset(_offset_s, 0., 2 * bins.Gcc.s2b_j * sizeof(int));
}
// Sending and receiving is the same since the outer two bin planes are shared
nparts_recv[NORTH] = nparts_send[NORTH];
nparts_recv[SOUTH] = nparts_send[SOUTH];
/* Send number of parts to north/south */
// origin target
// nparts_send[SOUTH] -> nparts_recv[NORTH]
// nparts_recv[SOUTH] <- nparts_send[NORTH]
//nparts_recv[SOUTH] = 0; // init
//nparts_recv[NORTH] = 0;
//mpi_send_nparts_j();
/* Allocate memory for send and recv partial sums */
int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs
// Indexing is, for example:
// _sum_send_n[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id]
// where
// part_id = [0, nparts) and sp = [0, 6)
// 0: Yp_re 1: Yp_im
// 2: rDYu_re 3: rDYu_im
// 4: xXDYu_re 5: xXDYu_im
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0);
int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0);
int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0);
int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0);
checkCudaErrors(cudaMalloc(&_sum_send_n, send_alloc_n*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_send_s, send_alloc_s*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_recv_n, recv_alloc_n*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_recv_s, recv_alloc_s*npsums*sizeof(real)));
/* Pack partial sums */
if (nparts_send[NORTH] > 0) {
pack_sums_n<<<bin_num_jnb, bin_dim_jnb>>>(_sum_send_n, _offset_n,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//cudaMemset(_sum_send_n, 0., send_alloc_n * npsums * sizeof(real));
}
if (nparts_send[SOUTH] > 0) {
pack_sums_s<<<bin_num_jnb, bin_dim_jnb>>>(_sum_send_s, _offset_s,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//cudaMemset(_sum_send_s, 0., send_alloc_s * npsums * sizeof(real));
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Communicate partial sums with MPI */
mpi_send_psums_j();
// Offsets are the same since they're over both ghost bins and edge bins
/* Unpack and complete partial sums */
if (nparts_recv[NORTH] > 0) {
unpack_sums_n<<<bin_num_jnb, bin_dim_jnb>>>(_sum_recv_n, _offset_n,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
if (nparts_recv[SOUTH] > 0) {
unpack_sums_s<<<bin_num_jnb, bin_dim_jnb>>>(_sum_recv_s, _offset_s,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Free */
cudaFree(_sum_send_n);
cudaFree(_sum_send_s);
cudaFree(_sum_recv_n);
cudaFree(_sum_recv_s);
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_n);
cudaFree(_offset_s);
}
extern "C"
void cuda_partial_sum_k(void)
{
//printf("N%d >> Communicating partial sums in k\n", rank);
/* Initialize execution config */
// Thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_t;
int *_offset_b;
checkCudaErrors(cudaMalloc(&_offset_t, 2 * bins.Gcc.s2b_k * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_b, 2 * bins.Gcc.s2b_k * sizeof(int)));
thrust::device_ptr<int> t_offset_t(_offset_t);
thrust::device_ptr<int> t_offset_b(_offset_b);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
s1b = bins.Gcc.inb;
s2b = s1b * bins.Gcc.jnb;
// North: _ke and _keb planes
if (dom[rank].t != MPI_PROC_NULL) {
// _bin_count is indexed with k varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ke plane
offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b);
nparts_send[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_t);
} else {
cudaMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
} else { // no parts to send
nparts_send[TOP] = 0;
cudaMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
// South: _ksb and _ks planes
if (dom[rank].b != MPI_PROC_NULL) {
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k,
0., thrust::plus<int>());
if (nparts_send[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + 2 * bins.Gcc.s2b_k, t_offset_b);
} else {
cudaMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
} else {
nparts_send[BOTTOM] = 0;
cudaMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
} else { // nparts = 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[TOP] = 0;
nparts_send[BOTTOM] = 0;
cudaMemset(_offset_t, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
cudaMemset(_offset_b, 0., 2 * bins.Gcc.s2b_k * sizeof(int));
}
// Sending and receiving is the same since the outer two bin planes are shared
nparts_recv[TOP] = nparts_send[TOP];
nparts_recv[BOTTOM] = nparts_send[BOTTOM];
/* Send number of parts to top/bottom */
// origin target
// nparts_send[BOTTOM] -> nparts_recv[TOP]
// nparts_recv[BOTTOM] <- nparts_send[TOP]
//nparts_recv[BOTTOM] = 0; // init
//nparts_recv[TOP] = 0;
//mpi_send_nparts_k();
/* Allocate memory for send and recv partial sums */
int npsums = NSP * ncoeffs_max; // 6 scalar products * ncoeffs
// Indexing is, for example:
// _sum_send_t[coeff + ncoeffs_max*sp + ncoeffs_max*nsp*part_id]
// where
// part_id = [0, nparts) and sp = [0, 6)
// 0: Yp_re 1: Yp_im
// 2: rDYu_re 3: rDYu_im
// 4: xXDYu_re 5: xXDYu_im
int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0);
int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0);
int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0);
int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0);
checkCudaErrors(cudaMalloc(&_sum_send_t, send_alloc_t*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_send_b, send_alloc_b*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_recv_t, recv_alloc_t*npsums*sizeof(real)));
checkCudaErrors(cudaMalloc(&_sum_recv_b, recv_alloc_b*npsums*sizeof(real)));
/* Pack partial sums */
if (nparts_send[TOP] > 0) {
pack_sums_t<<<bin_num_knb, bin_dim_knb>>>(_sum_send_t, _offset_t,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//cudaMemset(_sum_send_t, 0., send_alloc_t * npsums * sizeof(real));
}
if (nparts_send[BOTTOM] > 0) {
pack_sums_b<<<bin_num_knb, bin_dim_knb>>>(_sum_send_b, _offset_b,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
} else {
//cudaMemset(_sum_send_b, 0., send_alloc_b * npsums * sizeof(real));
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Communicate partial sums with MPI */
mpi_send_psums_k();
// Offsets are the same since they're over both ghost bins and edge bins
/* Unpack and complete partial sums */
if (nparts_recv[TOP] > 0) {
unpack_sums_t<<<bin_num_knb, bin_dim_knb>>>(_sum_recv_t, _offset_t,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
if (nparts_recv[BOTTOM] > 0) {
unpack_sums_b<<<bin_num_knb, bin_dim_knb>>>(_sum_recv_b, _offset_b,
_bin_start, _bin_count, _part_ind, ncoeffs_max,
_int_Yp_re, _int_Yp_im,
_int_rDYu_re, _int_rDYu_im,
_int_xXDYu_re, _int_xXDYu_im);
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Free */
cudaFree(_sum_send_t);
cudaFree(_sum_send_b);
cudaFree(_sum_recv_t);
cudaFree(_sum_recv_b);
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_t);
cudaFree(_offset_b);
}
extern "C"
real cuda_lamb_err(void)
{
//printf("N%d >> Determining Lamb's error\n", rank);
real error = DBL_MIN;
if (nparts > 0) {
// create a place to store sorted coefficients and errors
real *_part_errors;
cudaMalloc((void**) &_part_errors, nparts*sizeof(real));
// sort the coefficients and calculate errors along the way
dim3 numBlocks(nparts);
dim3 dimBlocks(ncoeffs_max);
compute_error<<<numBlocks, dimBlocks>>>(lamb_cut, ncoeffs_max, nparts,
_parts, _part_errors);
// find maximum error of all particles
thrust::device_ptr<real> t_part_errors(_part_errors);
error = thrust::reduce(t_part_errors,
t_part_errors + nparts,
0., thrust::maximum<real>());
// clean up
cudaFree(_part_errors);
// store copy of coefficients for future calculation
store_coeffs<<<numBlocks, dimBlocks>>>(_parts, nparts, ncoeffs_max);
}
// MPI reduce to find max error
MPI_Allreduce(MPI_IN_PLACE, &error, 1, mpi_real, MPI_MAX, MPI_COMM_WORLD);
return error;
}
|
b7a031cc605e07b20e40ec1412743a64391d619c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//example where there is heavy computation done
//using very little data, this example GPU outperforms CPU
//by 100 of times at least
#include <cstdlib>
#include <ctime>
#include <iostream>
#define TSZ 1024
#define BSZ 1024
#define N (BSZ * TSZ)
#define M 100000
#define TT float
using namespace std;
template <typename T>
__global__ void o2_cuda(T* a){
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
a[i] = (T)i / (T)N;
for (size_t j = 0; j < M; ++j)
a[i] = a[i] * a[i] - 0.25;
}
template <typename T>
clock_t o2(T* a){
for (size_t i = 0; i < N; ++i){
a[i] = (T)i / (T)N;
for (int j = 0; j < M; ++j)
a[i] = a[i] * a[i] - 0.25F;
}
return clock();
}
int main(){
TT* a = new TT[N], *b = new TT[N];
TT* db;
hipMalloc(&db, N * sizeof(TT));
clock_t timing_start = clock();
hipLaunchKernelGGL(( o2_cuda), dim3(BSZ), dim3(TSZ), 0, 0, db);
hipMemcpy(b, db, sizeof(TT) * N, hipMemcpyDeviceToHost);
cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl;
hipFree(db);
timing_start = clock();
clock_t timing_end = o2(a);
cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl;
bool is_same = true;
for (size_t i = 0; i < N; ++i)
if (a[i] != b[i]){
cout << "Index " << i << " is different" << endl;
is_same = false;
break;
}
if (is_same) cout << "Answer match" << endl;
}
| b7a031cc605e07b20e40ec1412743a64391d619c.cu | //example where there is heavy computation done
//using very little data, this example GPU outperforms CPU
//by 100 of times at least
#include <cstdlib>
#include <ctime>
#include <iostream>
#define TSZ 1024
#define BSZ 1024
#define N (BSZ * TSZ)
#define M 100000
#define TT float
using namespace std;
template <typename T>
__global__ void o2_cuda(T* a){
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
a[i] = (T)i / (T)N;
for (size_t j = 0; j < M; ++j)
a[i] = a[i] * a[i] - 0.25;
}
template <typename T>
clock_t o2(T* a){
for (size_t i = 0; i < N; ++i){
a[i] = (T)i / (T)N;
for (int j = 0; j < M; ++j)
a[i] = a[i] * a[i] - 0.25F;
}
return clock();
}
int main(){
TT* a = new TT[N], *b = new TT[N];
TT* db;
cudaMalloc(&db, N * sizeof(TT));
clock_t timing_start = clock();
o2_cuda<<<BSZ, TSZ>>>(db);
cudaMemcpy(b, db, sizeof(TT) * N, cudaMemcpyDeviceToHost);
cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl;
cudaFree(db);
timing_start = clock();
clock_t timing_end = o2(a);
cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl;
bool is_same = true;
for (size_t i = 0; i < N; ++i)
if (a[i] != b[i]){
cout << "Index " << i << " is different" << endl;
is_same = false;
break;
}
if (is_same) cout << "Answer match" << endl;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.